diff --git a/libraries/sqlite/unix/build/.libs/libsqlite3.a b/libraries/sqlite/unix/build/.libs/libsqlite3.a new file mode 100644 index 0000000000..47e9121815 Binary files /dev/null and b/libraries/sqlite/unix/build/.libs/libsqlite3.a differ diff --git a/libraries/sqlite/unix/build/.libs/libsqlite3.lai b/libraries/sqlite/unix/build/.libs/libsqlite3.lai new file mode 100644 index 0000000000..3fcb50d337 --- /dev/null +++ b/libraries/sqlite/unix/build/.libs/libsqlite3.lai @@ -0,0 +1,35 @@ +# libsqlite3.la - a libtool library file +# Generated by ltmain.sh - GNU libtool 1.5.22 (1.1220.2.365 2005/12/18 22:14:06) +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='libsqlite3.so.0' + +# Names of this library. +library_names='libsqlite3.so.0.8.6 libsqlite3.so.0 libsqlite3.so' + +# The name of the static archive. +old_library='libsqlite3.a' + +# Libraries that this one depends upon. +dependency_libs=' -lpthread' + +# Version information for libsqlite3. +current=8 +age=8 +revision=6 + +# Is this an already installed library? +installed=yes + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='/usr/local/lib' diff --git a/libraries/sqlite/unix/build/.libs/libsqlite3.so b/libraries/sqlite/unix/build/.libs/libsqlite3.so new file mode 120000 index 0000000000..026b67c3ab --- /dev/null +++ b/libraries/sqlite/unix/build/.libs/libsqlite3.so @@ -0,0 +1 @@ +libsqlite3.so.0.8.6 \ No newline at end of file diff --git a/libraries/sqlite/unix/build/.libs/libsqlite3.so.0 b/libraries/sqlite/unix/build/.libs/libsqlite3.so.0 new file mode 120000 index 0000000000..026b67c3ab --- /dev/null +++ b/libraries/sqlite/unix/build/.libs/libsqlite3.so.0 @@ -0,0 +1 @@ +libsqlite3.so.0.8.6 \ No newline at end of file diff --git a/libraries/sqlite/unix/build/.libs/libsqlite3.so.0.8.6 b/libraries/sqlite/unix/build/.libs/libsqlite3.so.0.8.6 new file mode 100755 index 0000000000..fbfd5e4cd5 Binary files /dev/null and b/libraries/sqlite/unix/build/.libs/libsqlite3.so.0.8.6 differ diff --git a/libraries/sqlite/unix/build/.libs/sqlite3 b/libraries/sqlite/unix/build/.libs/sqlite3 new file mode 100755 index 0000000000..8154d1033e Binary files /dev/null and b/libraries/sqlite/unix/build/.libs/sqlite3 differ diff --git a/libraries/sqlite/unix/build/Makefile b/libraries/sqlite/unix/build/Makefile new file mode 100644 index 0000000000..e3b6a4509d --- /dev/null +++ b/libraries/sqlite/unix/build/Makefile @@ -0,0 +1,775 @@ +#!/usr/make +# +# Makefile for SQLITE +# +# This makefile is suppose to be configured automatically using the +# autoconf. But if that does not work for you, you can configure +# the makefile manually. Just set the parameters below to values that +# work well for your system. +# +# If the configure script does not work out-of-the-box, you might +# be able to get it to work by giving it some hints. See the comment +# at the beginning of configure.in for additional information. +# + +# The toplevel directory of the source tree. This is the directory +# that contains this "Makefile.in" and the "configure.in" script. +# +TOP = ../sqlite-3.5.1 + +# C Compiler and options for use in building executables that +# will run on the platform that is doing the build. +# +BCC = gcc -g -O2 + +# C Compile and options for use in building executables that +# will run on the target platform. (BCC and TCC are usually the +# same unless your are cross-compiling.) +# +TCC = gcc -g -O2 -I. -I${TOP}/src + +# Define -DNDEBUG to compile without debugging (i.e., for production usage) +# Omitting the define will cause extra debugging code to be inserted and +# includes extra comments when "EXPLAIN stmt" is used. +# +TCC += -DNDEBUG + +# Compiler options needed for programs that use the TCL library. +# +TCC += + +# The library that programs using TCL must link against. +# +LIBTCL = + +# Compiler options needed for programs that use the readline() library. +# +READLINE_FLAGS = -DHAVE_READLINE=0 + +# The library that programs using readline() must link against. +# +LIBREADLINE = + +# Should the database engine be compiled threadsafe +# +TCC += -DSQLITE_THREADSAFE=1 + +# The pthreads library if needed +# +LIBPTHREAD=-lpthread + +# Do threads override each others locks by default (1), or do we test (-1) +# +TCC += -DSQLITE_THREAD_OVERRIDE_LOCK=-1 + +# The fdatasync library +TLIBS = + +# Flags controlling use of the in memory btree implementation +# +# TEMP_STORE is 0 to force temporary tables to be in a file, 1 to +# default to file, 2 to default to memory, and 3 to force temporary +# tables to always be in memory. +# +TEMP_STORE = -DTEMP_STORE=1 + +# Version numbers and release number for the SQLite being compiled. +# +VERSION = 3.5 +VERSION_NUMBER = 3005001 +RELEASE = 3.5.1 + +# Filename extensions +# +BEXE = +TEXE = + +# The following variable is "1" if the configure script was able to locate +# the tclConfig.sh file. It is an empty string otherwise. When this +# variable is "1", the TCL extension library (libtclsqlite3.so) is built +# and installed. +# +HAVE_TCL = + +# The suffix used on shared libraries. Ex: ".dll", ".so", ".dylib" +# +SHLIB_SUFFIX = @TCL_SHLIB_SUFFIX@ + +# The directory into which to store package information for + +# Some standard variables and programs +# +prefix = /usr/local +exec_prefix = ${prefix} +libdir = ${exec_prefix}/lib +INSTALL = /usr/bin/install -c +LIBTOOL = ./libtool +ALLOWRELEASE = + +# libtool compile/link/install +LTCOMPILE = $(LIBTOOL) --mode=compile --tag=CC $(TCC) +LTLINK = $(LIBTOOL) --mode=link $(TCC) +LTINSTALL = $(LIBTOOL) --mode=install $(INSTALL) + +# nawk compatible awk. +NAWK = mawk + +# You should not have to change anything below this line +############################################################################### +TCC += -DSQLITE_OMIT_LOAD_EXTENSION=1 + +# Object files for the SQLite library. +# +LIBOBJ = alter.lo analyze.lo attach.lo auth.lo btmutex.lo btree.lo build.lo \ + callback.lo complete.lo date.lo \ + delete.lo expr.lo func.lo hash.lo journal.lo insert.lo loadext.lo \ + main.lo malloc.lo mem1.lo mem2.lo mutex.lo \ + mutex_os2.lo mutex_unix.lo mutex_w32.lo \ + opcodes.lo os.lo os_unix.lo os_win.lo os_os2.lo \ + pager.lo parse.lo pragma.lo prepare.lo printf.lo random.lo \ + select.lo table.lo tokenize.lo trigger.lo update.lo \ + util.lo vacuum.lo \ + vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbefifo.lo vdbemem.lo \ + where.lo utf.lo legacy.lo vtab.lo + +# All of the source code files. +# +SRC = \ + $(TOP)/src/alter.c \ + $(TOP)/src/analyze.c \ + $(TOP)/src/attach.c \ + $(TOP)/src/auth.c \ + $(TOP)/src/btmutex.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/btree.h \ + $(TOP)/src/build.c \ + $(TOP)/src/callback.c \ + $(TOP)/src/complete.c \ + $(TOP)/src/date.c \ + $(TOP)/src/delete.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/func.c \ + $(TOP)/src/hash.c \ + $(TOP)/src/hash.h \ + $(TOP)/src/insert.c \ + $(TOP)/src/journal.c \ + $(TOP)/src/legacy.c \ + $(TOP)/src/loadext.c \ + $(TOP)/src/main.c \ + $(TOP)/src/malloc.c \ + $(TOP)/src/mem1.c \ + $(TOP)/src/mem2.c \ + $(TOP)/src/mutex.c \ + $(TOP)/src/mutex_os2.c \ + $(TOP)/src/mutex_unix.c \ + $(TOP)/src/mutex_w32.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pager.h \ + $(TOP)/src/parse.y \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/random.c \ + $(TOP)/src/select.c \ + $(TOP)/src/shell.c \ + $(TOP)/src/sqlite.h.in \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/table.c \ + $(TOP)/src/tclsqlite.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/trigger.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/update.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vacuum.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbe.h \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbeblob.c \ + $(TOP)/src/vdbefifo.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/vdbeInt.h \ + $(TOP)/src/vtab.c \ + $(TOP)/src/where.c + +# Source code for extensions +# +SRC += \ + $(TOP)/ext/fts1/fts1.c \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.c \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_porter.c \ + $(TOP)/ext/fts1/fts1_tokenizer.h \ + $(TOP)/ext/fts1/fts1_tokenizer1.c + + +# Source code to the test files. +# +TESTSRC = \ + $(TOP)/src/attach.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/build.c \ + $(TOP)/src/date.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/func.c \ + $(TOP)/src/insert.c \ + $(TOP)/src/malloc.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/select.c \ + $(TOP)/src/test1.c \ + $(TOP)/src/test2.c \ + $(TOP)/src/test3.c \ + $(TOP)/src/test4.c \ + $(TOP)/src/test5.c \ + $(TOP)/src/test6.c \ + $(TOP)/src/test7.c \ + $(TOP)/src/test8.c \ + $(TOP)/src/test9.c \ + $(TOP)/src/test_autoext.c \ + $(TOP)/src/test_async.c \ + $(TOP)/src/test_btree.c \ + $(TOP)/src/test_config.c \ + $(TOP)/src/test_hexio.c \ + $(TOP)/src/test_malloc.c \ + $(TOP)/src/test_md5.c \ + $(TOP)/src/test_schema.c \ + $(TOP)/src/test_server.c \ + $(TOP)/src/test_tclvar.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/where.c \ + parse.c + +# Header files used by all library source files. +# +HDR = \ + sqlite3.h \ + $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ + $(TOP)/src/hash.h \ + $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/mutex.h \ + opcodes.h \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ + $(TOP)/src/sqlite3ext.h \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/vdbe.h \ + parse.h + +# Header files used by extensions +# +HDR += \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_tokenizer.h + +# Header files used by the VDBE submodule +# +VDBEHDR = \ + $(HDR) \ + $(TOP)/src/vdbeInt.h + +# This is the default Makefile target. The objects listed here +# are what get build when you type just "make" with no arguments. +# +all: sqlite3.h libsqlite3.la sqlite3$(TEXE) $(HAVE_TCL:1=libtclsqlite3.la) + +Makefile: $(TOP)/Makefile.in + ./config.status + +# Generate the file "last_change" which contains the date of change +# of the most recently modified source code file +# +last_change: $(SRC) + cat $(SRC) | grep '$$Id: ' | sort -k 5 | tail -1 \ + | $(NAWK) '{print $$5,$$6}' >last_change + +libsqlite3.la: $(LIBOBJ) + $(LTLINK) -o libsqlite3.la $(LIBOBJ) $(LIBPTHREAD) \ + ${ALLOWRELEASE} -rpath $(libdir) -version-info "8:6:8" + +libtclsqlite3.la: tclsqlite.lo libsqlite3.la + $(LTLINK) -o libtclsqlite3.la tclsqlite.lo \ + $(LIBOBJ) $(LIBPTHREAD) \ + -rpath $(libdir)/sqlite \ + -version-info "8:6:8" + +sqlite3$(TEXE): $(TOP)/src/shell.c libsqlite3.la sqlite3.h + $(LTLINK) $(READLINE_FLAGS) $(LIBPTHREAD) \ + -o $@ $(TOP)/src/shell.c libsqlite3.la \ + $(LIBREADLINE) $(TLIBS) + +# This target creates a directory named "tsrc" and fills it with +# copies of all of the C source code and header files needed to +# build on the target system. Some of the C source code and header +# files are automatically generated. This target takes care of +# all that automatic generation. +# +target_source: $(SRC) parse.c opcodes.c keywordhash.h $(VDBEHDR) + rm -rf tsrc + mkdir -p tsrc + cp $(SRC) $(VDBEHDR) tsrc + rm tsrc/sqlite.h.in tsrc/parse.y + cp parse.c opcodes.c keywordhash.h tsrc + +sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl + tclsh $(TOP)/tool/mksqlite3c.tcl + +# Rules to build the LEMON compiler generator +# +lemon$(BEXE): $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c + $(BCC) -o lemon$(BEXE) $(TOP)/tool/lemon.c + cp $(TOP)/tool/lempar.c . + + +# Rules to build individual files +# +alter.lo: $(TOP)/src/alter.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/alter.c + +analyze.lo: $(TOP)/src/analyze.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/analyze.c + +attach.lo: $(TOP)/src/attach.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/attach.c + +auth.lo: $(TOP)/src/auth.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/auth.c + +btmutex.lo: $(TOP)/src/btmutex.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/btmutex.c + +btree.lo: $(TOP)/src/btree.c $(HDR) $(TOP)/src/pager.h + $(LTCOMPILE) -c $(TOP)/src/btree.c + +build.lo: $(TOP)/src/build.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/build.c + +callback.lo: $(TOP)/src/callback.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/callback.c + +complete.lo: $(TOP)/src/complete.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/complete.c + +date.lo: $(TOP)/src/date.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/date.c + +delete.lo: $(TOP)/src/delete.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/delete.c + +expr.lo: $(TOP)/src/expr.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/expr.c + +func.lo: $(TOP)/src/func.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/func.c + +hash.lo: $(TOP)/src/hash.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/hash.c + +insert.lo: $(TOP)/src/insert.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/insert.c + +journal.lo: $(TOP)/src/journal.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/journal.c + +legacy.lo: $(TOP)/src/legacy.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/legacy.c + +loadext.lo: $(TOP)/src/loadext.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/loadext.c + +main.lo: $(TOP)/src/main.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/main.c + +malloc.lo: $(TOP)/src/malloc.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/malloc.c + +mem1.lo: $(TOP)/src/mem1.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem1.c + +mem2.lo: $(TOP)/src/mem2.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem2.c + +mutex.lo: $(TOP)/src/mutex.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex.c + +mutex_os2.lo: $(TOP)/src/mutex_os2.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_os2.c + +mutex_unix.lo: $(TOP)/src/mutex_unix.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_unix.c + +mutex_w32.lo: $(TOP)/src/mutex_w32.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_w32.c + +pager.lo: $(TOP)/src/pager.c $(HDR) $(TOP)/src/pager.h + $(LTCOMPILE) -c $(TOP)/src/pager.c + +opcodes.lo: opcodes.c + $(LTCOMPILE) -c opcodes.c + +opcodes.c: opcodes.h $(TOP)/mkopcodec.awk + sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c + +opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk + cat parse.h $(TOP)/src/vdbe.c | $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h + +os.lo: $(TOP)/src/os.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os.c + +os_unix.lo: $(TOP)/src/os_unix.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os_unix.c + +os_win.lo: $(TOP)/src/os_win.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os_win.c + +os_os2.lo: $(TOP)/src/os_os2.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os_os2.c + +parse.lo: parse.c $(HDR) + $(LTCOMPILE) -c parse.c + +parse.h: parse.c + +parse.c: $(TOP)/src/parse.y lemon$(BEXE) $(TOP)/addopcodes.awk + cp $(TOP)/src/parse.y . + ./lemon$(BEXE) $(OPTS) parse.y + mv parse.h parse.h.temp + $(NAWK) -f $(TOP)/addopcodes.awk parse.h.temp >parse.h + +pragma.lo: $(TOP)/src/pragma.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/pragma.c + +prepare.lo: $(TOP)/src/prepare.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/prepare.c + +printf.lo: $(TOP)/src/printf.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/printf.c + +random.lo: $(TOP)/src/random.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/random.c + +select.lo: $(TOP)/src/select.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/select.c + +sqlite3.h: $(TOP)/src/sqlite.h.in + sed -e s/--VERS--/$(RELEASE)/ $(TOP)/src/sqlite.h.in | \ + sed -e s/--VERSION-NUMBER--/$(VERSION_NUMBER)/ >sqlite3.h + +table.lo: $(TOP)/src/table.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/table.c + +tclsqlite.lo: $(TOP)/src/tclsqlite.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/tclsqlite.c + +tokenize.lo: $(TOP)/src/tokenize.c keywordhash.h $(HDR) + $(LTCOMPILE) -c $(TOP)/src/tokenize.c + +keywordhash.h: $(TOP)/tool/mkkeywordhash.c + $(BCC) -o mkkeywordhash$(BEXE) $(OPTS) $(TOP)/tool/mkkeywordhash.c + ./mkkeywordhash$(BEXE) >keywordhash.h + +trigger.lo: $(TOP)/src/trigger.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/trigger.c + +update.lo: $(TOP)/src/update.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/update.c + +utf.lo: $(TOP)/src/utf.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/utf.c + +util.lo: $(TOP)/src/util.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/util.c + +vacuum.lo: $(TOP)/src/vacuum.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/vacuum.c + +vdbe.lo: $(TOP)/src/vdbe.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbe.c + +vdbeapi.lo: $(TOP)/src/vdbeapi.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbeapi.c + +vdbeaux.lo: $(TOP)/src/vdbeaux.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbeaux.c + +vdbeblob.lo: $(TOP)/src/vdbeblob.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbeblob.c + +vdbefifo.lo: $(TOP)/src/vdbefifo.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbefifo.c + +vdbemem.lo: $(TOP)/src/vdbemem.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbemem.c + +vtab.lo: $(TOP)/src/vtab.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vtab.c + +where.lo: $(TOP)/src/where.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/where.c + +tclsqlite-shell.lo: $(TOP)/src/tclsqlite.c $(HDR) + $(LTCOMPILE) -DTCLSH=1 -o $@ -c $(TOP)/src/tclsqlite.c + +tclsqlite-stubs.lo: $(TOP)/src/tclsqlite.c $(HDR) + $(LTCOMPILE) -DTCL_USE_STUBS=1 -o $@ -c $(TOP)/src/tclsqlite.c + +tclsqlite3: tclsqlite-shell.lo libsqlite3.la + $(LTLINK) -o tclsqlite3 tclsqlite-shell.lo \ + libsqlite3.la $(LIBTCL) + +testfixture$(TEXE): $(TOP)/src/tclsqlite.c libsqlite3.la $(TESTSRC) + $(LTLINK) -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ + -DSQLITE_NO_SYNC=1 $(TEMP_STORE) \ + -o testfixture $(TESTSRC) $(TOP)/src/tclsqlite.c \ + libsqlite3.la $(LIBTCL) + + +fulltest: testfixture$(TEXE) sqlite3$(TEXE) + ./testfixture $(TOP)/test/all.test + +test: testfixture$(TEXE) sqlite3$(TEXE) + ./testfixture $(TOP)/test/quick.test + +sqlite3_analyzer$(TEXE): $(TOP)/src/tclsqlite.c libtclsqlite3.la \ + $(TESTSRC) $(TOP)/tool/spaceanal.tcl + sed \ + -e '/^#/d' \ + -e 's,\\,\\\\,g' \ + -e 's,",\\",g' \ + -e 's,^,",' \ + -e 's,$$,\\n",' \ + $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h + $(LTLINK) -DTCLSH=2 -DSQLITE_TEST=1 $(TEMP_STORE)\ + -o sqlite3_analyzer$(EXE) $(TESTSRC) $(TOP)/src/tclsqlite.c \ + libtclsqlite3.la $(LIBTCL) + +# Rules used to build documentation +# +arch.html: $(TOP)/www/arch.tcl + tclsh $(TOP)/www/arch.tcl >arch.html + +arch2.gif: $(TOP)/www/arch2.gif + cp $(TOP)/www/arch2.gif . + +autoinc.html: $(TOP)/www/autoinc.tcl + tclsh $(TOP)/www/autoinc.tcl >autoinc.html + +c_interface.html: $(TOP)/www/c_interface.tcl + tclsh $(TOP)/www/c_interface.tcl >c_interface.html + +capi3.html: $(TOP)/www/capi3.tcl + tclsh $(TOP)/www/capi3.tcl >capi3.html + +capi3ref.html: $(TOP)/www/mkapidoc.tcl sqlite3.h + tclsh $(TOP)/www/mkapidoc.tcl capi3ref.html + +changes.html: $(TOP)/www/changes.tcl + tclsh $(TOP)/www/changes.tcl >changes.html + +compile.html: $(TOP)/www/compile.tcl + tclsh $(TOP)/www/compile.tcl >compile.html + +copyright.html: $(TOP)/www/copyright.tcl + tclsh $(TOP)/www/copyright.tcl >copyright.html + +copyright-release.html: $(TOP)/www/copyright-release.html + cp $(TOP)/www/copyright-release.html . + +copyright-release.pdf: $(TOP)/www/copyright-release.pdf + cp $(TOP)/www/copyright-release.pdf . + +common.tcl: $(TOP)/www/common.tcl + cp $(TOP)/www/common.tcl . + +conflict.html: $(TOP)/www/conflict.tcl + tclsh $(TOP)/www/conflict.tcl >conflict.html + +datatypes.html: $(TOP)/www/datatypes.tcl + tclsh $(TOP)/www/datatypes.tcl >datatypes.html + +datatype3.html: $(TOP)/www/datatype3.tcl + tclsh $(TOP)/www/datatype3.tcl >datatype3.html + +docs.html: $(TOP)/www/docs.tcl + tclsh $(TOP)/www/docs.tcl >docs.html + +download.html: $(TOP)/www/download.tcl + mkdir -p doc + tclsh $(TOP)/www/download.tcl >download.html + +faq.html: $(TOP)/www/faq.tcl + tclsh $(TOP)/www/faq.tcl >faq.html + +fileformat.html: $(TOP)/www/fileformat.tcl + tclsh $(TOP)/www/fileformat.tcl >fileformat.html + +formatchng.html: $(TOP)/www/formatchng.tcl + tclsh $(TOP)/www/formatchng.tcl >formatchng.html + +index.html: $(TOP)/www/index.tcl last_change + tclsh $(TOP)/www/index.tcl >index.html + +limits.html: $(TOP)/www/limits.tcl last_change + tclsh $(TOP)/www/limits.tcl >limits.html + +lang.html: $(TOP)/www/lang.tcl + tclsh $(TOP)/www/lang.tcl >lang.html + +pragma.html: $(TOP)/www/pragma.tcl + tclsh $(TOP)/www/pragma.tcl >pragma.html + +lockingv3.html: $(TOP)/www/lockingv3.tcl + tclsh $(TOP)/www/lockingv3.tcl >lockingv3.html + +oldnews.html: $(TOP)/www/oldnews.tcl + tclsh $(TOP)/www/oldnews.tcl >oldnews.html + +omitted.html: $(TOP)/www/omitted.tcl + tclsh $(TOP)/www/omitted.tcl >omitted.html + +opcode.html: $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c + tclsh $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c >opcode.html + +mingw.html: $(TOP)/www/mingw.tcl + tclsh $(TOP)/www/mingw.tcl >mingw.html + +nulls.html: $(TOP)/www/nulls.tcl + tclsh $(TOP)/www/nulls.tcl >nulls.html + +quickstart.html: $(TOP)/www/quickstart.tcl + tclsh $(TOP)/www/quickstart.tcl >quickstart.html + +speed.html: $(TOP)/www/speed.tcl + tclsh $(TOP)/www/speed.tcl >speed.html + +sqlite.gif: $(TOP)/art/SQLite.gif + cp $(TOP)/art/SQLite.gif sqlite.gif + +sqlite.html: $(TOP)/www/sqlite.tcl + tclsh $(TOP)/www/sqlite.tcl >sqlite.html + +support.html: $(TOP)/www/support.tcl + tclsh $(TOP)/www/support.tcl >support.html + +tclsqlite.html: $(TOP)/www/tclsqlite.tcl + tclsh $(TOP)/www/tclsqlite.tcl >tclsqlite.html + +vdbe.html: $(TOP)/www/vdbe.tcl + tclsh $(TOP)/www/vdbe.tcl >vdbe.html + +version3.html: $(TOP)/www/version3.tcl + tclsh $(TOP)/www/version3.tcl >version3.html + + +# Files to be published on the website. +# +DOC = \ + arch.html \ + arch2.gif \ + autoinc.html \ + c_interface.html \ + capi3.html \ + capi3ref.html \ + changes.html \ + compile.html \ + copyright.html \ + copyright-release.html \ + copyright-release.pdf \ + conflict.html \ + datatypes.html \ + datatype3.html \ + docs.html \ + download.html \ + faq.html \ + fileformat.html \ + formatchng.html \ + index.html \ + lang.html \ + limits.html \ + lockingv3.html \ + mingw.html \ + nulls.html \ + oldnews.html \ + omitted.html \ + opcode.html \ + pragma.html \ + quickstart.html \ + speed.html \ + sqlite.gif \ + sqlite.html \ + support.html \ + tclsqlite.html \ + vdbe.html \ + version3.html + +doc: common.tcl $(DOC) + mkdir -p doc + mv $(DOC) doc + +install: sqlite3 libsqlite3.la sqlite3.h ${HAVE_TCL:1=tcl_install} + $(INSTALL) -d $(DESTDIR)$(libdir) + $(LTINSTALL) libsqlite3.la $(DESTDIR)$(libdir) + $(INSTALL) -d $(DESTDIR)$(exec_prefix)/bin + $(LTINSTALL) sqlite3 $(DESTDIR)$(exec_prefix)/bin + $(INSTALL) -d $(DESTDIR)$(prefix)/include + $(INSTALL) -m 0644 sqlite3.h $(DESTDIR)$(prefix)/include + $(INSTALL) -m 0644 $(TOP)/src/sqlite3ext.h $(DESTDIR)$(prefix)/include + $(INSTALL) -d $(DESTDIR)$(libdir)/pkgconfig; + $(INSTALL) -m 0644 sqlite3.pc $(DESTDIR)$(libdir)/pkgconfig; + +tcl_install: libtclsqlite3.la + tclsh $(TOP)/tclinstaller.tcl $(VERSION) + +clean: + rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la + rm -f sqlite3.h opcodes.* + rm -rf .libs .deps + rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz + rm -f mkkeywordhash$(BEXE) keywordhash.h + rm -f $(PUBLISH) + rm -f *.da *.bb *.bbg gmon.out + rm -f testfixture$(TEXE) test.db + rm -rf doc + rm -f common.tcl + rm -f sqlite3.dll sqlite3.lib sqlite3.def + +distclean: clean + rm -f config.log config.status libtool Makefile config.h sqlite3.pc + +# +# Windows section +# +dll: sqlite3.dll + +REAL_LIBOBJ = $(LIBOBJ:%.lo=.libs/%.o) + +$(REAL_LIBOBJ): $(LIBOBJ) + +sqlite3.def: $(REAL_LIBOBJ) + echo 'EXPORTS' >sqlite3.def + nm $(REAL_LIBOBJ) | grep ' T ' | grep ' _sqlite3_' \ + | sed 's/^.* _//' >>sqlite3.def + +sqlite3.dll: $(REAL_LIBOBJ) sqlite3.def + $(TCC) -shared -o sqlite3.dll sqlite3.def \ + -Wl,"--strip-all" $(REAL_LIBOBJ) diff --git a/libraries/sqlite/unix/opensim_build_notes.txt b/libraries/sqlite/unix/opensim_build_notes.txt new file mode 100644 index 0000000000..370d2bd916 --- /dev/null +++ b/libraries/sqlite/unix/opensim_build_notes.txt @@ -0,0 +1,6 @@ +downloaded tarball from: +http://www.sqlite.org/sqlite-3.5.1.tar.gz + +build: +see README +instead of install, you can copy build/.libs/libsqlite3.so.0.8.6 to bin/libsqlite3.so.0 diff --git a/libraries/sqlite/unix/sqlite-3.5.1/Makefile.in b/libraries/sqlite/unix/sqlite-3.5.1/Makefile.in new file mode 100644 index 0000000000..5f7d040105 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/Makefile.in @@ -0,0 +1,775 @@ +#!/usr/make +# +# Makefile for SQLITE +# +# This makefile is suppose to be configured automatically using the +# autoconf. But if that does not work for you, you can configure +# the makefile manually. Just set the parameters below to values that +# work well for your system. +# +# If the configure script does not work out-of-the-box, you might +# be able to get it to work by giving it some hints. See the comment +# at the beginning of configure.in for additional information. +# + +# The toplevel directory of the source tree. This is the directory +# that contains this "Makefile.in" and the "configure.in" script. +# +TOP = @srcdir@ + +# C Compiler and options for use in building executables that +# will run on the platform that is doing the build. +# +BCC = @BUILD_CC@ @BUILD_CFLAGS@ + +# C Compile and options for use in building executables that +# will run on the target platform. (BCC and TCC are usually the +# same unless your are cross-compiling.) +# +TCC = @CC@ @CFLAGS@ -I. -I${TOP}/src + +# Define -DNDEBUG to compile without debugging (i.e., for production usage) +# Omitting the define will cause extra debugging code to be inserted and +# includes extra comments when "EXPLAIN stmt" is used. +# +TCC += @TARGET_DEBUG@ @XTHREADCONNECT@ + +# Compiler options needed for programs that use the TCL library. +# +TCC += @TCL_INCLUDE_SPEC@ + +# The library that programs using TCL must link against. +# +LIBTCL = @TCL_LIB_SPEC@ @TCL_LIBS@ + +# Compiler options needed for programs that use the readline() library. +# +READLINE_FLAGS = -DHAVE_READLINE=@TARGET_HAVE_READLINE@ @TARGET_READLINE_INC@ + +# The library that programs using readline() must link against. +# +LIBREADLINE = @TARGET_READLINE_LIBS@ + +# Should the database engine be compiled threadsafe +# +TCC += -DSQLITE_THREADSAFE=@SQLITE_THREADSAFE@ + +# The pthreads library if needed +# +LIBPTHREAD=@TARGET_THREAD_LIB@ + +# Do threads override each others locks by default (1), or do we test (-1) +# +TCC += -DSQLITE_THREAD_OVERRIDE_LOCK=@THREADSOVERRIDELOCKS@ + +# The fdatasync library +TLIBS = @LIBS@ + +# Flags controlling use of the in memory btree implementation +# +# TEMP_STORE is 0 to force temporary tables to be in a file, 1 to +# default to file, 2 to default to memory, and 3 to force temporary +# tables to always be in memory. +# +TEMP_STORE = -DTEMP_STORE=@TEMP_STORE@ + +# Version numbers and release number for the SQLite being compiled. +# +VERSION = @VERSION@ +VERSION_NUMBER = @VERSION_NUMBER@ +RELEASE = @RELEASE@ + +# Filename extensions +# +BEXE = @BUILD_EXEEXT@ +TEXE = @TARGET_EXEEXT@ + +# The following variable is "1" if the configure script was able to locate +# the tclConfig.sh file. It is an empty string otherwise. When this +# variable is "1", the TCL extension library (libtclsqlite3.so) is built +# and installed. +# +HAVE_TCL = @HAVE_TCL@ + +# The suffix used on shared libraries. Ex: ".dll", ".so", ".dylib" +# +SHLIB_SUFFIX = @TCL_SHLIB_SUFFIX@ + +# The directory into which to store package information for + +# Some standard variables and programs +# +prefix = @prefix@ +exec_prefix = @exec_prefix@ +libdir = @libdir@ +INSTALL = @INSTALL@ +LIBTOOL = ./libtool +ALLOWRELEASE = @ALLOWRELEASE@ + +# libtool compile/link/install +LTCOMPILE = $(LIBTOOL) --mode=compile --tag=CC $(TCC) +LTLINK = $(LIBTOOL) --mode=link $(TCC) @LDFLAGS@ +LTINSTALL = $(LIBTOOL) --mode=install $(INSTALL) + +# nawk compatible awk. +NAWK = @AWK@ + +# You should not have to change anything below this line +############################################################################### +TCC += -DSQLITE_OMIT_LOAD_EXTENSION=1 + +# Object files for the SQLite library. +# +LIBOBJ = alter.lo analyze.lo attach.lo auth.lo btmutex.lo btree.lo build.lo \ + callback.lo complete.lo date.lo \ + delete.lo expr.lo func.lo hash.lo journal.lo insert.lo loadext.lo \ + main.lo malloc.lo mem1.lo mem2.lo mutex.lo \ + mutex_os2.lo mutex_unix.lo mutex_w32.lo \ + opcodes.lo os.lo os_unix.lo os_win.lo os_os2.lo \ + pager.lo parse.lo pragma.lo prepare.lo printf.lo random.lo \ + select.lo table.lo tokenize.lo trigger.lo update.lo \ + util.lo vacuum.lo \ + vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbefifo.lo vdbemem.lo \ + where.lo utf.lo legacy.lo vtab.lo + +# All of the source code files. +# +SRC = \ + $(TOP)/src/alter.c \ + $(TOP)/src/analyze.c \ + $(TOP)/src/attach.c \ + $(TOP)/src/auth.c \ + $(TOP)/src/btmutex.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/btree.h \ + $(TOP)/src/build.c \ + $(TOP)/src/callback.c \ + $(TOP)/src/complete.c \ + $(TOP)/src/date.c \ + $(TOP)/src/delete.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/func.c \ + $(TOP)/src/hash.c \ + $(TOP)/src/hash.h \ + $(TOP)/src/insert.c \ + $(TOP)/src/journal.c \ + $(TOP)/src/legacy.c \ + $(TOP)/src/loadext.c \ + $(TOP)/src/main.c \ + $(TOP)/src/malloc.c \ + $(TOP)/src/mem1.c \ + $(TOP)/src/mem2.c \ + $(TOP)/src/mutex.c \ + $(TOP)/src/mutex_os2.c \ + $(TOP)/src/mutex_unix.c \ + $(TOP)/src/mutex_w32.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pager.h \ + $(TOP)/src/parse.y \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/random.c \ + $(TOP)/src/select.c \ + $(TOP)/src/shell.c \ + $(TOP)/src/sqlite.h.in \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/table.c \ + $(TOP)/src/tclsqlite.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/trigger.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/update.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vacuum.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbe.h \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbeblob.c \ + $(TOP)/src/vdbefifo.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/vdbeInt.h \ + $(TOP)/src/vtab.c \ + $(TOP)/src/where.c + +# Source code for extensions +# +SRC += \ + $(TOP)/ext/fts1/fts1.c \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.c \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_porter.c \ + $(TOP)/ext/fts1/fts1_tokenizer.h \ + $(TOP)/ext/fts1/fts1_tokenizer1.c + + +# Source code to the test files. +# +TESTSRC = \ + $(TOP)/src/attach.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/build.c \ + $(TOP)/src/date.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/func.c \ + $(TOP)/src/insert.c \ + $(TOP)/src/malloc.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/select.c \ + $(TOP)/src/test1.c \ + $(TOP)/src/test2.c \ + $(TOP)/src/test3.c \ + $(TOP)/src/test4.c \ + $(TOP)/src/test5.c \ + $(TOP)/src/test6.c \ + $(TOP)/src/test7.c \ + $(TOP)/src/test8.c \ + $(TOP)/src/test9.c \ + $(TOP)/src/test_autoext.c \ + $(TOP)/src/test_async.c \ + $(TOP)/src/test_btree.c \ + $(TOP)/src/test_config.c \ + $(TOP)/src/test_hexio.c \ + $(TOP)/src/test_malloc.c \ + $(TOP)/src/test_md5.c \ + $(TOP)/src/test_schema.c \ + $(TOP)/src/test_server.c \ + $(TOP)/src/test_tclvar.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/where.c \ + parse.c + +# Header files used by all library source files. +# +HDR = \ + sqlite3.h \ + $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ + $(TOP)/src/hash.h \ + $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/mutex.h \ + opcodes.h \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ + $(TOP)/src/sqlite3ext.h \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/vdbe.h \ + parse.h + +# Header files used by extensions +# +HDR += \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_tokenizer.h + +# Header files used by the VDBE submodule +# +VDBEHDR = \ + $(HDR) \ + $(TOP)/src/vdbeInt.h + +# This is the default Makefile target. The objects listed here +# are what get build when you type just "make" with no arguments. +# +all: sqlite3.h libsqlite3.la sqlite3$(TEXE) $(HAVE_TCL:1=libtclsqlite3.la) + +Makefile: $(TOP)/Makefile.in + ./config.status + +# Generate the file "last_change" which contains the date of change +# of the most recently modified source code file +# +last_change: $(SRC) + cat $(SRC) | grep '$$Id: ' | sort -k 5 | tail -1 \ + | $(NAWK) '{print $$5,$$6}' >last_change + +libsqlite3.la: $(LIBOBJ) + $(LTLINK) -o libsqlite3.la $(LIBOBJ) $(LIBPTHREAD) \ + ${ALLOWRELEASE} -rpath $(libdir) -version-info "8:6:8" + +libtclsqlite3.la: tclsqlite.lo libsqlite3.la + $(LTLINK) -o libtclsqlite3.la tclsqlite.lo \ + $(LIBOBJ) @TCL_STUB_LIB_SPEC@ $(LIBPTHREAD) \ + -rpath $(libdir)/sqlite \ + -version-info "8:6:8" + +sqlite3$(TEXE): $(TOP)/src/shell.c libsqlite3.la sqlite3.h + $(LTLINK) $(READLINE_FLAGS) $(LIBPTHREAD) \ + -o $@ $(TOP)/src/shell.c libsqlite3.la \ + $(LIBREADLINE) $(TLIBS) + +# This target creates a directory named "tsrc" and fills it with +# copies of all of the C source code and header files needed to +# build on the target system. Some of the C source code and header +# files are automatically generated. This target takes care of +# all that automatic generation. +# +target_source: $(SRC) parse.c opcodes.c keywordhash.h $(VDBEHDR) + rm -rf tsrc + mkdir -p tsrc + cp $(SRC) $(VDBEHDR) tsrc + rm tsrc/sqlite.h.in tsrc/parse.y + cp parse.c opcodes.c keywordhash.h tsrc + +sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl + tclsh $(TOP)/tool/mksqlite3c.tcl + +# Rules to build the LEMON compiler generator +# +lemon$(BEXE): $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c + $(BCC) -o lemon$(BEXE) $(TOP)/tool/lemon.c + cp $(TOP)/tool/lempar.c . + + +# Rules to build individual files +# +alter.lo: $(TOP)/src/alter.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/alter.c + +analyze.lo: $(TOP)/src/analyze.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/analyze.c + +attach.lo: $(TOP)/src/attach.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/attach.c + +auth.lo: $(TOP)/src/auth.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/auth.c + +btmutex.lo: $(TOP)/src/btmutex.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/btmutex.c + +btree.lo: $(TOP)/src/btree.c $(HDR) $(TOP)/src/pager.h + $(LTCOMPILE) -c $(TOP)/src/btree.c + +build.lo: $(TOP)/src/build.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/build.c + +callback.lo: $(TOP)/src/callback.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/callback.c + +complete.lo: $(TOP)/src/complete.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/complete.c + +date.lo: $(TOP)/src/date.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/date.c + +delete.lo: $(TOP)/src/delete.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/delete.c + +expr.lo: $(TOP)/src/expr.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/expr.c + +func.lo: $(TOP)/src/func.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/func.c + +hash.lo: $(TOP)/src/hash.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/hash.c + +insert.lo: $(TOP)/src/insert.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/insert.c + +journal.lo: $(TOP)/src/journal.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/journal.c + +legacy.lo: $(TOP)/src/legacy.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/legacy.c + +loadext.lo: $(TOP)/src/loadext.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/loadext.c + +main.lo: $(TOP)/src/main.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/main.c + +malloc.lo: $(TOP)/src/malloc.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/malloc.c + +mem1.lo: $(TOP)/src/mem1.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem1.c + +mem2.lo: $(TOP)/src/mem2.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem2.c + +mutex.lo: $(TOP)/src/mutex.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex.c + +mutex_os2.lo: $(TOP)/src/mutex_os2.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_os2.c + +mutex_unix.lo: $(TOP)/src/mutex_unix.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_unix.c + +mutex_w32.lo: $(TOP)/src/mutex_w32.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_w32.c + +pager.lo: $(TOP)/src/pager.c $(HDR) $(TOP)/src/pager.h + $(LTCOMPILE) -c $(TOP)/src/pager.c + +opcodes.lo: opcodes.c + $(LTCOMPILE) -c opcodes.c + +opcodes.c: opcodes.h $(TOP)/mkopcodec.awk + sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c + +opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk + cat parse.h $(TOP)/src/vdbe.c | $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h + +os.lo: $(TOP)/src/os.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os.c + +os_unix.lo: $(TOP)/src/os_unix.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os_unix.c + +os_win.lo: $(TOP)/src/os_win.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os_win.c + +os_os2.lo: $(TOP)/src/os_os2.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/os_os2.c + +parse.lo: parse.c $(HDR) + $(LTCOMPILE) -c parse.c + +parse.h: parse.c + +parse.c: $(TOP)/src/parse.y lemon$(BEXE) $(TOP)/addopcodes.awk + cp $(TOP)/src/parse.y . + ./lemon$(BEXE) $(OPTS) parse.y + mv parse.h parse.h.temp + $(NAWK) -f $(TOP)/addopcodes.awk parse.h.temp >parse.h + +pragma.lo: $(TOP)/src/pragma.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/pragma.c + +prepare.lo: $(TOP)/src/prepare.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/prepare.c + +printf.lo: $(TOP)/src/printf.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/printf.c + +random.lo: $(TOP)/src/random.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/random.c + +select.lo: $(TOP)/src/select.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/select.c + +sqlite3.h: $(TOP)/src/sqlite.h.in + sed -e s/--VERS--/$(RELEASE)/ $(TOP)/src/sqlite.h.in | \ + sed -e s/--VERSION-NUMBER--/$(VERSION_NUMBER)/ >sqlite3.h + +table.lo: $(TOP)/src/table.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/table.c + +tclsqlite.lo: $(TOP)/src/tclsqlite.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/tclsqlite.c + +tokenize.lo: $(TOP)/src/tokenize.c keywordhash.h $(HDR) + $(LTCOMPILE) -c $(TOP)/src/tokenize.c + +keywordhash.h: $(TOP)/tool/mkkeywordhash.c + $(BCC) -o mkkeywordhash$(BEXE) $(OPTS) $(TOP)/tool/mkkeywordhash.c + ./mkkeywordhash$(BEXE) >keywordhash.h + +trigger.lo: $(TOP)/src/trigger.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/trigger.c + +update.lo: $(TOP)/src/update.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/update.c + +utf.lo: $(TOP)/src/utf.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/utf.c + +util.lo: $(TOP)/src/util.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/util.c + +vacuum.lo: $(TOP)/src/vacuum.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/vacuum.c + +vdbe.lo: $(TOP)/src/vdbe.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbe.c + +vdbeapi.lo: $(TOP)/src/vdbeapi.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbeapi.c + +vdbeaux.lo: $(TOP)/src/vdbeaux.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbeaux.c + +vdbeblob.lo: $(TOP)/src/vdbeblob.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbeblob.c + +vdbefifo.lo: $(TOP)/src/vdbefifo.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbefifo.c + +vdbemem.lo: $(TOP)/src/vdbemem.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vdbemem.c + +vtab.lo: $(TOP)/src/vtab.c $(VDBEHDR) + $(LTCOMPILE) -c $(TOP)/src/vtab.c + +where.lo: $(TOP)/src/where.c $(HDR) + $(LTCOMPILE) -c $(TOP)/src/where.c + +tclsqlite-shell.lo: $(TOP)/src/tclsqlite.c $(HDR) + $(LTCOMPILE) -DTCLSH=1 -o $@ -c $(TOP)/src/tclsqlite.c + +tclsqlite-stubs.lo: $(TOP)/src/tclsqlite.c $(HDR) + $(LTCOMPILE) -DTCL_USE_STUBS=1 -o $@ -c $(TOP)/src/tclsqlite.c + +tclsqlite3: tclsqlite-shell.lo libsqlite3.la + $(LTLINK) -o tclsqlite3 tclsqlite-shell.lo \ + libsqlite3.la $(LIBTCL) + +testfixture$(TEXE): $(TOP)/src/tclsqlite.c libsqlite3.la $(TESTSRC) + $(LTLINK) -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ + -DSQLITE_NO_SYNC=1 $(TEMP_STORE) \ + -o testfixture $(TESTSRC) $(TOP)/src/tclsqlite.c \ + libsqlite3.la $(LIBTCL) + + +fulltest: testfixture$(TEXE) sqlite3$(TEXE) + ./testfixture $(TOP)/test/all.test + +test: testfixture$(TEXE) sqlite3$(TEXE) + ./testfixture $(TOP)/test/quick.test + +sqlite3_analyzer$(TEXE): $(TOP)/src/tclsqlite.c libtclsqlite3.la \ + $(TESTSRC) $(TOP)/tool/spaceanal.tcl + sed \ + -e '/^#/d' \ + -e 's,\\,\\\\,g' \ + -e 's,",\\",g' \ + -e 's,^,",' \ + -e 's,$$,\\n",' \ + $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h + $(LTLINK) -DTCLSH=2 -DSQLITE_TEST=1 $(TEMP_STORE)\ + -o sqlite3_analyzer$(EXE) $(TESTSRC) $(TOP)/src/tclsqlite.c \ + libtclsqlite3.la $(LIBTCL) + +# Rules used to build documentation +# +arch.html: $(TOP)/www/arch.tcl + tclsh $(TOP)/www/arch.tcl >arch.html + +arch2.gif: $(TOP)/www/arch2.gif + cp $(TOP)/www/arch2.gif . + +autoinc.html: $(TOP)/www/autoinc.tcl + tclsh $(TOP)/www/autoinc.tcl >autoinc.html + +c_interface.html: $(TOP)/www/c_interface.tcl + tclsh $(TOP)/www/c_interface.tcl >c_interface.html + +capi3.html: $(TOP)/www/capi3.tcl + tclsh $(TOP)/www/capi3.tcl >capi3.html + +capi3ref.html: $(TOP)/www/mkapidoc.tcl sqlite3.h + tclsh $(TOP)/www/mkapidoc.tcl capi3ref.html + +changes.html: $(TOP)/www/changes.tcl + tclsh $(TOP)/www/changes.tcl >changes.html + +compile.html: $(TOP)/www/compile.tcl + tclsh $(TOP)/www/compile.tcl >compile.html + +copyright.html: $(TOP)/www/copyright.tcl + tclsh $(TOP)/www/copyright.tcl >copyright.html + +copyright-release.html: $(TOP)/www/copyright-release.html + cp $(TOP)/www/copyright-release.html . + +copyright-release.pdf: $(TOP)/www/copyright-release.pdf + cp $(TOP)/www/copyright-release.pdf . + +common.tcl: $(TOP)/www/common.tcl + cp $(TOP)/www/common.tcl . + +conflict.html: $(TOP)/www/conflict.tcl + tclsh $(TOP)/www/conflict.tcl >conflict.html + +datatypes.html: $(TOP)/www/datatypes.tcl + tclsh $(TOP)/www/datatypes.tcl >datatypes.html + +datatype3.html: $(TOP)/www/datatype3.tcl + tclsh $(TOP)/www/datatype3.tcl >datatype3.html + +docs.html: $(TOP)/www/docs.tcl + tclsh $(TOP)/www/docs.tcl >docs.html + +download.html: $(TOP)/www/download.tcl + mkdir -p doc + tclsh $(TOP)/www/download.tcl >download.html + +faq.html: $(TOP)/www/faq.tcl + tclsh $(TOP)/www/faq.tcl >faq.html + +fileformat.html: $(TOP)/www/fileformat.tcl + tclsh $(TOP)/www/fileformat.tcl >fileformat.html + +formatchng.html: $(TOP)/www/formatchng.tcl + tclsh $(TOP)/www/formatchng.tcl >formatchng.html + +index.html: $(TOP)/www/index.tcl last_change + tclsh $(TOP)/www/index.tcl >index.html + +limits.html: $(TOP)/www/limits.tcl last_change + tclsh $(TOP)/www/limits.tcl >limits.html + +lang.html: $(TOP)/www/lang.tcl + tclsh $(TOP)/www/lang.tcl >lang.html + +pragma.html: $(TOP)/www/pragma.tcl + tclsh $(TOP)/www/pragma.tcl >pragma.html + +lockingv3.html: $(TOP)/www/lockingv3.tcl + tclsh $(TOP)/www/lockingv3.tcl >lockingv3.html + +oldnews.html: $(TOP)/www/oldnews.tcl + tclsh $(TOP)/www/oldnews.tcl >oldnews.html + +omitted.html: $(TOP)/www/omitted.tcl + tclsh $(TOP)/www/omitted.tcl >omitted.html + +opcode.html: $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c + tclsh $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c >opcode.html + +mingw.html: $(TOP)/www/mingw.tcl + tclsh $(TOP)/www/mingw.tcl >mingw.html + +nulls.html: $(TOP)/www/nulls.tcl + tclsh $(TOP)/www/nulls.tcl >nulls.html + +quickstart.html: $(TOP)/www/quickstart.tcl + tclsh $(TOP)/www/quickstart.tcl >quickstart.html + +speed.html: $(TOP)/www/speed.tcl + tclsh $(TOP)/www/speed.tcl >speed.html + +sqlite.gif: $(TOP)/art/SQLite.gif + cp $(TOP)/art/SQLite.gif sqlite.gif + +sqlite.html: $(TOP)/www/sqlite.tcl + tclsh $(TOP)/www/sqlite.tcl >sqlite.html + +support.html: $(TOP)/www/support.tcl + tclsh $(TOP)/www/support.tcl >support.html + +tclsqlite.html: $(TOP)/www/tclsqlite.tcl + tclsh $(TOP)/www/tclsqlite.tcl >tclsqlite.html + +vdbe.html: $(TOP)/www/vdbe.tcl + tclsh $(TOP)/www/vdbe.tcl >vdbe.html + +version3.html: $(TOP)/www/version3.tcl + tclsh $(TOP)/www/version3.tcl >version3.html + + +# Files to be published on the website. +# +DOC = \ + arch.html \ + arch2.gif \ + autoinc.html \ + c_interface.html \ + capi3.html \ + capi3ref.html \ + changes.html \ + compile.html \ + copyright.html \ + copyright-release.html \ + copyright-release.pdf \ + conflict.html \ + datatypes.html \ + datatype3.html \ + docs.html \ + download.html \ + faq.html \ + fileformat.html \ + formatchng.html \ + index.html \ + lang.html \ + limits.html \ + lockingv3.html \ + mingw.html \ + nulls.html \ + oldnews.html \ + omitted.html \ + opcode.html \ + pragma.html \ + quickstart.html \ + speed.html \ + sqlite.gif \ + sqlite.html \ + support.html \ + tclsqlite.html \ + vdbe.html \ + version3.html + +doc: common.tcl $(DOC) + mkdir -p doc + mv $(DOC) doc + +install: sqlite3 libsqlite3.la sqlite3.h ${HAVE_TCL:1=tcl_install} + $(INSTALL) -d $(DESTDIR)$(libdir) + $(LTINSTALL) libsqlite3.la $(DESTDIR)$(libdir) + $(INSTALL) -d $(DESTDIR)$(exec_prefix)/bin + $(LTINSTALL) sqlite3 $(DESTDIR)$(exec_prefix)/bin + $(INSTALL) -d $(DESTDIR)$(prefix)/include + $(INSTALL) -m 0644 sqlite3.h $(DESTDIR)$(prefix)/include + $(INSTALL) -m 0644 $(TOP)/src/sqlite3ext.h $(DESTDIR)$(prefix)/include + $(INSTALL) -d $(DESTDIR)$(libdir)/pkgconfig; + $(INSTALL) -m 0644 sqlite3.pc $(DESTDIR)$(libdir)/pkgconfig; + +tcl_install: libtclsqlite3.la + tclsh $(TOP)/tclinstaller.tcl $(VERSION) + +clean: + rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la + rm -f sqlite3.h opcodes.* + rm -rf .libs .deps + rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz + rm -f mkkeywordhash$(BEXE) keywordhash.h + rm -f $(PUBLISH) + rm -f *.da *.bb *.bbg gmon.out + rm -f testfixture$(TEXE) test.db + rm -rf doc + rm -f common.tcl + rm -f sqlite3.dll sqlite3.lib sqlite3.def + +distclean: clean + rm -f config.log config.status libtool Makefile config.h sqlite3.pc + +# +# Windows section +# +dll: sqlite3.dll + +REAL_LIBOBJ = $(LIBOBJ:%.lo=.libs/%.o) + +$(REAL_LIBOBJ): $(LIBOBJ) + +sqlite3.def: $(REAL_LIBOBJ) + echo 'EXPORTS' >sqlite3.def + nm $(REAL_LIBOBJ) | grep ' T ' | grep ' _sqlite3_' \ + | sed 's/^.* _//' >>sqlite3.def + +sqlite3.dll: $(REAL_LIBOBJ) sqlite3.def + $(TCC) -shared -o sqlite3.dll sqlite3.def \ + -Wl,"--strip-all" $(REAL_LIBOBJ) diff --git a/libraries/sqlite/unix/sqlite-3.5.1/Makefile.linux-gcc b/libraries/sqlite/unix/sqlite-3.5.1/Makefile.linux-gcc new file mode 100644 index 0000000000..b4d769914e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/Makefile.linux-gcc @@ -0,0 +1,124 @@ +#!/usr/make +# +# Makefile for SQLITE +# +# This is a template makefile for SQLite. Most people prefer to +# use the autoconf generated "configure" script to generate the +# makefile automatically. But that does not work for everybody +# and in every situation. If you are having problems with the +# "configure" script, you might want to try this makefile as an +# alternative. Create a copy of this file, edit the parameters +# below and type "make". +# + +#### The toplevel directory of the source tree. This is the directory +# that contains this "Makefile.in" and the "configure.in" script. +# +TOP = ../sqlite + +#### C Compiler and options for use in building executables that +# will run on the platform that is doing the build. +# +BCC = gcc -g -O2 +#BCC = /opt/ancic/bin/c89 -0 + +#### If the target operating system supports the "usleep()" system +# call, then define the HAVE_USLEEP macro for all C modules. +# +#USLEEP = +USLEEP = -DHAVE_USLEEP=1 + +#### If you want the SQLite library to be safe for use within a +# multi-threaded program, then define the following macro +# appropriately: +# +#THREADSAFE = -DTHREADSAFE=1 +THREADSAFE = -DTHREADSAFE=0 + +#### Specify any extra linker options needed to make the library +# thread safe +# +#THREADLIB = -lpthread +THREADLIB = + +#### Specify any extra libraries needed to access required functions. +# +#TLIBS = -lrt # fdatasync on Solaris 8 +TLIBS = + +#### Leave SQLITE_DEBUG undefined for maximum speed. Use SQLITE_DEBUG=1 +# to check for memory leaks. Use SQLITE_DEBUG=2 to print a log of all +# malloc()s and free()s in order to track down memory leaks. +# +# SQLite uses some expensive assert() statements in the inner loop. +# You can make the library go almost twice as fast if you compile +# with -DNDEBUG=1 +# +#OPTS = -DSQLITE_DEBUG=2 +#OPTS = -DSQLITE_DEBUG=1 +#OPTS = +OPTS = -DNDEBUG=1 +OPTS += -DHAVE_FDATASYNC=1 + +#### The suffix to add to executable files. ".exe" for windows. +# Nothing for unix. +# +#EXE = .exe +EXE = + +#### C Compile and options for use in building executables that +# will run on the target platform. This is usually the same +# as BCC, unless you are cross-compiling. +# +TCC = gcc -O6 +#TCC = gcc -g -O0 -Wall +#TCC = gcc -g -O0 -Wall -fprofile-arcs -ftest-coverage +#TCC = /opt/mingw/bin/i386-mingw32-gcc -O6 +#TCC = /opt/ansic/bin/c89 -O +z -Wl,-a,archive + +#### Tools used to build a static library. +# +AR = ar cr +#AR = /opt/mingw/bin/i386-mingw32-ar cr +RANLIB = ranlib +#RANLIB = /opt/mingw/bin/i386-mingw32-ranlib + +MKSHLIB = gcc -shared +SO = so +SHPREFIX = lib +# SO = dll +# SHPREFIX = + +#### Extra compiler options needed for programs that use the TCL library. +# +#TCL_FLAGS = +#TCL_FLAGS = -DSTATIC_BUILD=1 +TCL_FLAGS = -I/home/drh/tcltk/8.4linux +#TCL_FLAGS = -I/home/drh/tcltk/8.4win -DSTATIC_BUILD=1 +#TCL_FLAGS = -I/home/drh/tcltk/8.3hpux + +#### Linker options needed to link against the TCL library. +# +#LIBTCL = -ltcl -lm -ldl +LIBTCL = /home/drh/tcltk/8.4linux/libtcl8.4g.a -lm -ldl +#LIBTCL = /home/drh/tcltk/8.4win/libtcl84s.a -lmsvcrt +#LIBTCL = /home/drh/tcltk/8.3hpux/libtcl8.3.a -ldld -lm -lc + +#### Compiler options needed for programs that use the readline() library. +# +READLINE_FLAGS = +#READLINE_FLAGS = -DHAVE_READLINE=1 -I/usr/include/readline + +#### Linker options needed by programs using readline() must link against. +# +LIBREADLINE = +#LIBREADLINE = -static -lreadline -ltermcap + +#### Which "awk" program provides nawk compatibilty +# +# NAWK = nawk +NAWK = awk + +# You should not have to change anything below this line +############################################################################### +include $(TOP)/main.mk diff --git a/libraries/sqlite/unix/sqlite-3.5.1/README b/libraries/sqlite/unix/sqlite-3.5.1/README new file mode 100644 index 0000000000..6e4f392054 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/README @@ -0,0 +1,35 @@ +This directory contains source code to + + SQLite: An Embeddable SQL Database Engine + +To compile the project, first create a directory in which to place +the build products. It is recommended, but not required, that the +build directory be separate from the source directory. Cd into the +build directory and then from the build directory run the configure +script found at the root of the source tree. Then run "make". + +For example: + + tar xzf sqlite.tar.gz ;# Unpack the source tree into "sqlite" + mkdir bld ;# Build will occur in a sibling directory + cd bld ;# Change to the build directory + ../sqlite/configure ;# Run the configure script + make ;# Run the makefile. + make install ;# (Optional) Install the build products + +The configure script uses autoconf 2.50 and libtool. If the configure +script does not work out for you, there is a generic makefile named +"Makefile.linux-gcc" in the top directory of the source tree that you +can copy and edit to suite your needs. Comments on the generic makefile +show what changes are needed. + +The linux binaries on the website are created using the generic makefile, +not the configure script. The configure script is unmaintained. (You +can volunteer to take over maintenance of the configure script, if you want!) +The windows binaries on the website are created using MinGW32 configured +as a cross-compiler running under Linux. For details, see the ./publish.sh +script at the top-level of the source tree. + +Contacts: + + http://www.sqlite.org/ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/VERSION b/libraries/sqlite/unix/sqlite-3.5.1/VERSION new file mode 100644 index 0000000000..d5c0c99142 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/VERSION @@ -0,0 +1 @@ +3.5.1 diff --git a/libraries/sqlite/unix/sqlite-3.5.1/aclocal.m4 b/libraries/sqlite/unix/sqlite-3.5.1/aclocal.m4 new file mode 100644 index 0000000000..27b6c66b39 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/aclocal.m4 @@ -0,0 +1,6470 @@ +# generated automatically by aclocal 1.10 -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005, 2006 Free Software Foundation, Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- + +# serial 48 AC_PROG_LIBTOOL + + +# AC_PROVIDE_IFELSE(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED) +# ----------------------------------------------------------- +# If this macro is not defined by Autoconf, define it here. +m4_ifdef([AC_PROVIDE_IFELSE], + [], + [m4_define([AC_PROVIDE_IFELSE], + [m4_ifdef([AC_PROVIDE_$1], + [$2], [$3])])]) + + +# AC_PROG_LIBTOOL +# --------------- +AC_DEFUN([AC_PROG_LIBTOOL], +[AC_REQUIRE([_AC_PROG_LIBTOOL])dnl +dnl If AC_PROG_CXX has already been expanded, run AC_LIBTOOL_CXX +dnl immediately, otherwise, hook it in at the end of AC_PROG_CXX. + AC_PROVIDE_IFELSE([AC_PROG_CXX], + [AC_LIBTOOL_CXX], + [define([AC_PROG_CXX], defn([AC_PROG_CXX])[AC_LIBTOOL_CXX + ])]) +dnl And a similar setup for Fortran 77 support + AC_PROVIDE_IFELSE([AC_PROG_F77], + [AC_LIBTOOL_F77], + [define([AC_PROG_F77], defn([AC_PROG_F77])[AC_LIBTOOL_F77 +])]) + +dnl Quote A][M_PROG_GCJ so that aclocal doesn't bring it in needlessly. +dnl If either AC_PROG_GCJ or A][M_PROG_GCJ have already been expanded, run +dnl AC_LIBTOOL_GCJ immediately, otherwise, hook it in at the end of both. + AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [ifdef([AC_PROG_GCJ], + [define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[AC_LIBTOOL_GCJ])]) + ifdef([A][M_PROG_GCJ], + [define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[AC_LIBTOOL_GCJ])]) + ifdef([LT_AC_PROG_GCJ], + [define([LT_AC_PROG_GCJ], + defn([LT_AC_PROG_GCJ])[AC_LIBTOOL_GCJ])])])]) +])])# AC_PROG_LIBTOOL + + +# _AC_PROG_LIBTOOL +# ---------------- +AC_DEFUN([_AC_PROG_LIBTOOL], +[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl +AC_BEFORE([$0],[AC_LIBTOOL_CXX])dnl +AC_BEFORE([$0],[AC_LIBTOOL_F77])dnl +AC_BEFORE([$0],[AC_LIBTOOL_GCJ])dnl + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +# Prevent multiple expansion +define([AC_PROG_LIBTOOL], []) +])# _AC_PROG_LIBTOOL + + +# AC_LIBTOOL_SETUP +# ---------------- +AC_DEFUN([AC_LIBTOOL_SETUP], +[AC_PREREQ(2.50)dnl +AC_REQUIRE([AC_ENABLE_SHARED])dnl +AC_REQUIRE([AC_ENABLE_STATIC])dnl +AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_LD])dnl +AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl +AC_REQUIRE([AC_PROG_NM])dnl + +AC_REQUIRE([AC_PROG_LN_S])dnl +AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl +# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! +AC_REQUIRE([AC_OBJEXT])dnl +AC_REQUIRE([AC_EXEEXT])dnl +dnl + +AC_LIBTOOL_SYS_MAX_CMD_LEN +AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE +AC_LIBTOOL_OBJDIR + +AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl +_LT_AC_PROG_ECHO_BACKSLASH + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='sed -e 1s/^X//' +[sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'] + +# Same as above, but do not quote variable references. +[double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'] + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +# Constants: +rm="rm -f" + +# Global variables: +default_ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a +ltmain="$ac_aux_dir/ltmain.sh" +ofile="$default_ofile" +with_gnu_ld="$lt_cv_prog_gnu_ld" + +AC_CHECK_TOOL(AR, ar, false) +AC_CHECK_TOOL(RANLIB, ranlib, :) +AC_CHECK_TOOL(STRIP, strip, :) + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +test -z "$AS" && AS=as +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$DLLTOOL" && DLLTOOL=dlltool +test -z "$LD" && LD=ld +test -z "$LN_S" && LN_S="ln -s" +test -z "$MAGIC_CMD" && MAGIC_CMD=file +test -z "$NM" && NM=nm +test -z "$SED" && SED=sed +test -z "$OBJDUMP" && OBJDUMP=objdump +test -z "$RANLIB" && RANLIB=: +test -z "$STRIP" && STRIP=: +test -z "$ac_objext" && ac_objext=o + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + AC_PATH_MAGIC + fi + ;; +esac + +AC_PROVIDE_IFELSE([AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no) +AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], +enable_win32_dll=yes, enable_win32_dll=no) + +AC_ARG_ENABLE([libtool-lock], + [AC_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +AC_ARG_WITH([pic], + [AC_HELP_STRING([--with-pic], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [pic_mode="$withval"], + [pic_mode=default]) +test -z "$pic_mode" && pic_mode=default + +# Check if we have a version mismatch between libtool.m4 and ltmain.sh. +# +# Note: This should be in AC_LIBTOOL_SETUP, _after_ $ltmain have been defined. +# We also should do it _before_ AC_LIBTOOL_LANG_C_CONFIG that actually +# calls AC_LIBTOOL_CONFIG and creates libtool. +# +_LT_VERSION_CHECK + +# Use C for the default configuration in the libtool script +tagname= +AC_LIBTOOL_LANG_C_CONFIG +_LT_AC_TAGCONFIG +])# AC_LIBTOOL_SETUP + + +# _LT_VERSION_CHECK +# ----------------- +AC_DEFUN([_LT_VERSION_CHECK], +[AC_MSG_CHECKING([for correct ltmain.sh version]) +if test "x$ltmain" = "x" ; then + AC_MSG_RESULT(no) + AC_MSG_ERROR([ + +*** @<:@Gentoo@:>@ sanity check failed! *** +*** \$ltmain is not defined, please check the patch for consistency! *** +]) +fi +gentoo_lt_version="1.5.22" +gentoo_ltmain_version=`sed -n '/^[[ ]]*VERSION=/{s/^[[ ]]*VERSION=//;p;q;}' "$ltmain"` +if test "x$gentoo_lt_version" != "x$gentoo_ltmain_version" ; then + AC_MSG_RESULT(no) + AC_MSG_ERROR([ + +*** @<:@Gentoo@:>@ sanity check failed! *** +*** libtool.m4 and ltmain.sh have a version mismatch! *** +*** (libtool.m4 = $gentoo_lt_version, ltmain.sh = $gentoo_ltmain_version) *** + +Please run: + + libtoolize --copy --force + +if appropriate, please contact the maintainer of this +package (or your distribution) for help. +]) +else + AC_MSG_RESULT(yes) +fi +])# _LT_VERSION_CHECK + + +# _LT_AC_SYS_COMPILER +# ------------------- +AC_DEFUN([_LT_AC_SYS_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_AC_SYS_COMPILER + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +AC_DEFUN([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +]) + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +AC_DEFUN([_LT_COMPILER_BOILERPLATE], +[ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +AC_DEFUN([_LT_LINKER_BOILERPLATE], +[ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* +])# _LT_LINKER_BOILERPLATE + + +# _LT_AC_SYS_LIBPATH_AIX +# ---------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX], +[AC_LINK_IFELSE(AC_LANG_PROGRAM,[ +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi],[]) +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi +])# _LT_AC_SYS_LIBPATH_AIX + + +# _LT_AC_SHELL_INIT(ARG) +# ---------------------- +AC_DEFUN([_LT_AC_SHELL_INIT], +[ifdef([AC_DIVERSION_NOTICE], + [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], + [AC_DIVERT_PUSH(NOTICE)]) +$1 +AC_DIVERT_POP +])# _LT_AC_SHELL_INIT + + +# _LT_AC_PROG_ECHO_BACKSLASH +# -------------------------- +# Add some code to the start of the generated configure script which +# will find an echo command which doesn't interpret backslashes. +AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH], +[_LT_AC_SHELL_INIT([ +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} + +case X$ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` + ;; +esac + +echo=${ECHO-echo} +if test "X[$]1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X[$]1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then + # Yippee, $echo works! + : +else + # Restart under the correct shell. + exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} +fi + +if test "X[$]1" = X--fallback-echo; then + # used as fallback echo + shift + cat </dev/null 2>&1 && unset CDPATH + +if test -z "$ECHO"; then +if test "X${echo_test_string+set}" != Xset; then +# find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if (echo_test_string=`eval $cmd`) 2>/dev/null && + echo_test_string=`eval $cmd` && + (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null + then + break + fi + done +fi + +if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : +else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. + + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" + + if test "X$echo" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + echo='print -r' + elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} + else + # Try using printf. + echo='printf %s\n' + if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + echo="$CONFIG_SHELL [$]0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$CONFIG_SHELL [$]0 --fallback-echo" + else + # maybe with a smaller string... + prev=: + + for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do + if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null + then + break + fi + prev="$cmd" + done + + if test "$prev" != 'sed 50q "[$]0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} + else + # Oops. We lost completely, so just stick with echo. + echo=echo + fi + fi + fi + fi +fi +fi + +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +ECHO=$echo +if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then + ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" +fi + +AC_SUBST(ECHO) +])])# _LT_AC_PROG_ECHO_BACKSLASH + + +# _LT_AC_LOCK +# ----------- +AC_DEFUN([_LT_AC_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AC_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line __oline__ "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) LD="${LD-ld} -64" ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], +[*-*-cygwin* | *-*-mingw* | *-*-pw32*) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; + ]) +esac + +need_locks="$enable_libtool_lock" + +])# _LT_AC_LOCK + + +# AC_LIBTOOL_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], +[AC_REQUIRE([LT_AC_PROG_SED]) +AC_CACHE_CHECK([$1], [$2], + [$2=no + ifelse([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $rm conftest* +]) + +if test x"[$]$2" = xyes; then + ifelse([$5], , :, [$5]) +else + ifelse([$6], , :, [$6]) +fi +])# AC_LIBTOOL_COMPILER_OPTION + + +# AC_LIBTOOL_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ------------------------------------------------------------ +# Check whether the given compiler option works +AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], +[AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + ifelse([$4], , :, [$4]) +else + ifelse([$5], , :, [$5]) +fi +])# AC_LIBTOOL_LINKER_OPTION + + +# AC_LIBTOOL_SYS_MAX_CMD_LEN +# -------------------------- +AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], +[# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL [$]0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && + lt_cv_sys_max_cmd_len=$new_result && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + teststring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +])# AC_LIBTOOL_SYS_MAX_CMD_LEN + + +# _LT_AC_CHECK_DLFCN +# ------------------ +AC_DEFUN([_LT_AC_CHECK_DLFCN], +[AC_CHECK_HEADERS(dlfcn.h)dnl +])# _LT_AC_CHECK_DLFCN + + +# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# --------------------------------------------------------------------- +AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF], +[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + exit (status); +}] +EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_AC_TRY_DLOPEN_SELF + + +# AC_LIBTOOL_DLOPEN_SELF +# ---------------------- +AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], +[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_AC_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_AC_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +])# AC_LIBTOOL_DLOPEN_SELF + + +# AC_LIBTOOL_PROG_CC_C_O([TAGNAME]) +# --------------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler +AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O], +[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* +]) +])# AC_LIBTOOL_PROG_CC_C_O + + +# AC_LIBTOOL_SYS_HARD_LINK_LOCKS([TAGNAME]) +# ----------------------------------------- +# Check to see if we can do hard links to lock some files if needed +AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], +[AC_REQUIRE([_LT_AC_LOCK])dnl + +hard_links="nottested" +if test "$_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +])# AC_LIBTOOL_SYS_HARD_LINK_LOCKS + + +# AC_LIBTOOL_OBJDIR +# ----------------- +AC_DEFUN([AC_LIBTOOL_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +])# AC_LIBTOOL_OBJDIR + + +# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH([TAGNAME]) +# ---------------------------------------------- +# Check hardcoding attributes. +AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_AC_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \ + test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \ + test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_AC_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_AC_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_AC_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_AC_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_AC_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_AC_TAGVAR(hardcode_action, $1)" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +])# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH + + +# AC_LIBTOOL_SYS_LIB_STRIP +# ------------------------ +AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP], +[striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) +fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +])# AC_LIBTOOL_SYS_LIB_STRIP + + +# AC_LIBTOOL_SYS_DYNAMIC_LINKER +# ----------------------------- +# PORTME Fill in your ld.so characteristics +AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER], +[AC_MSG_CHECKING([dynamic linker characteristics]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | [grep ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + linux*) + if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + $archive_expsym_cmds="$archive_cmds" + fi + else + ld_shlibs=no + fi + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[123]]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + # Handle Gentoo/FreeBSD as it was Linux + case $host_vendor in + gentoo) + version_type=linux ;; + *) + version_type=freebsd-$objformat ;; + esac + + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + linux) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + need_lib_prefix=no + need_version=no + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi +])# AC_LIBTOOL_SYS_DYNAMIC_LINKER + + +# _LT_AC_TAGCONFIG +# ---------------- +AC_DEFUN([_LT_AC_TAGCONFIG], +[AC_ARG_WITH([tags], + [AC_HELP_STRING([--with-tags@<:@=TAGS@:>@], + [include additional configurations @<:@automatic@:>@])], + [tagnames="$withval"]) + +if test -f "$ltmain" && test -n "$tagnames"; then + if test ! -f "${ofile}"; then + AC_MSG_WARN([output file `$ofile' does not exist]) + fi + + if test -z "$LTCC"; then + eval "`$SHELL ${ofile} --config | grep '^LTCC='`" + if test -z "$LTCC"; then + AC_MSG_WARN([output file `$ofile' does not look like a libtool script]) + else + AC_MSG_WARN([using `LTCC=$LTCC', extracted from `$ofile']) + fi + fi + if test -z "$LTCFLAGS"; then + eval "`$SHELL ${ofile} --config | grep '^LTCFLAGS='`" + fi + + # Extract list of available tagged configurations in $ofile. + # Note that this assumes the entire list is on one line. + available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` + + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for tagname in $tagnames; do + IFS="$lt_save_ifs" + # Check whether tagname contains only valid characters + case `$echo "X$tagname" | $Xsed -e 's:[[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]]::g'` in + "") ;; + *) AC_MSG_ERROR([invalid tag name: $tagname]) + ;; + esac + + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null + then + AC_MSG_ERROR([tag name \"$tagname\" already exists]) + fi + + # Update the list of available tags. + if test -n "$tagname"; then + echo appending configuration tag \"$tagname\" to $ofile + + case $tagname in + CXX) + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_LIBTOOL_LANG_CXX_CONFIG + else + tagname="" + fi + ;; + + F77) + if test -n "$F77" && test "X$F77" != "Xno"; then + AC_LIBTOOL_LANG_F77_CONFIG + else + tagname="" + fi + ;; + + GCJ) + if test -n "$GCJ" && test "X$GCJ" != "Xno"; then + AC_LIBTOOL_LANG_GCJ_CONFIG + else + tagname="" + fi + ;; + + RC) + AC_LIBTOOL_LANG_RC_CONFIG + ;; + + *) + AC_MSG_ERROR([Unsupported tag name: $tagname]) + ;; + esac + + # Append the new tag name to the list of available tags. + if test -n "$tagname" ; then + available_tags="$available_tags $tagname" + fi + fi + done + IFS="$lt_save_ifs" + + # Now substitute the updated list of available tags. + if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then + mv "${ofile}T" "$ofile" + chmod +x "$ofile" + else + rm -f "${ofile}T" + AC_MSG_ERROR([unable to update list of available tagged configurations.]) + fi +fi +])# _LT_AC_TAGCONFIG + + +# AC_LIBTOOL_DLOPEN +# ----------------- +# enable checks for dlopen support +AC_DEFUN([AC_LIBTOOL_DLOPEN], + [AC_BEFORE([$0],[AC_LIBTOOL_SETUP]) +])# AC_LIBTOOL_DLOPEN + + +# AC_LIBTOOL_WIN32_DLL +# -------------------- +# declare package support for building win32 DLLs +AC_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_BEFORE([$0], [AC_LIBTOOL_SETUP]) +])# AC_LIBTOOL_WIN32_DLL + + +# AC_ENABLE_SHARED([DEFAULT]) +# --------------------------- +# implement the --enable-shared flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_SHARED], +[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([shared], + [AC_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]AC_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]AC_ENABLE_SHARED_DEFAULT) +])# AC_ENABLE_SHARED + + +# AC_DISABLE_SHARED +# ----------------- +# set the default shared flag to --disable-shared +AC_DEFUN([AC_DISABLE_SHARED], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_SHARED(no) +])# AC_DISABLE_SHARED + + +# AC_ENABLE_STATIC([DEFAULT]) +# --------------------------- +# implement the --enable-static flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_STATIC], +[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([static], + [AC_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]AC_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]AC_ENABLE_STATIC_DEFAULT) +])# AC_ENABLE_STATIC + + +# AC_DISABLE_STATIC +# ----------------- +# set the default static flag to --disable-static +AC_DEFUN([AC_DISABLE_STATIC], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_STATIC(no) +])# AC_DISABLE_STATIC + + +# AC_ENABLE_FAST_INSTALL([DEFAULT]) +# --------------------------------- +# implement the --enable-fast-install flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_FAST_INSTALL], +[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([fast-install], + [AC_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]AC_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]AC_ENABLE_FAST_INSTALL_DEFAULT) +])# AC_ENABLE_FAST_INSTALL + + +# AC_DISABLE_FAST_INSTALL +# ----------------------- +# set the default to --disable-fast-install +AC_DEFUN([AC_DISABLE_FAST_INSTALL], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_FAST_INSTALL(no) +])# AC_DISABLE_FAST_INSTALL + + +# AC_LIBTOOL_PICMODE([MODE]) +# -------------------------- +# implement the --with-pic flag +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +AC_DEFUN([AC_LIBTOOL_PICMODE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +pic_mode=ifelse($#,1,$1,default) +])# AC_LIBTOOL_PICMODE + + +# AC_PROG_EGREP +# ------------- +# This is predefined starting with Autoconf 2.54, so this conditional +# definition can be removed once we require Autoconf 2.54 or later. +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP], +[AC_CACHE_CHECK([for egrep], [ac_cv_prog_egrep], + [if echo a | (grep -E '(a|b)') >/dev/null 2>&1 + then ac_cv_prog_egrep='grep -E' + else ac_cv_prog_egrep='egrep' + fi]) + EGREP=$ac_cv_prog_egrep + AC_SUBST([EGREP]) +])]) + + +# AC_PATH_TOOL_PREFIX +# ------------------- +# find a file program which can recognise shared library +AC_DEFUN([AC_PATH_TOOL_PREFIX], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="ifelse([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +])# AC_PATH_TOOL_PREFIX + + +# AC_PATH_MAGIC +# ------------- +# find a file program which can recognise a shared library +AC_DEFUN([AC_PATH_MAGIC], +[AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + AC_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# AC_PATH_MAGIC + + +# AC_PROG_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([AC_PROG_LD], +[AC_ARG_WITH([gnu-ld], + [AC_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no]) +AC_REQUIRE([LT_AC_PROG_SED])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix3*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +nto-qnx*) + lt_cv_deplibs_check_method=unknown + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown +])# AC_DEPLIBS_CHECK_METHOD + + +# AC_PROG_NM +# ---------- +# find the pathname to a BSD-compatible name lister +AC_DEFUN([AC_PROG_NM], +[AC_CACHE_CHECK([for BSD-compatible nm], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm +fi]) +NM="$lt_cv_path_NM" +])# AC_PROG_NM + + +# AC_CHECK_LIBM +# ------------- +# check for math library +AC_DEFUN([AC_CHECK_LIBM], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +])# AC_CHECK_LIBM + + +# AC_LIBLTDL_CONVENIENCE([DIRECTORY]) +# ----------------------------------- +# sets LIBLTDL to the link flags for the libltdl convenience library and +# LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-convenience to the configure arguments. Note that +# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, +# it is assumed to be `libltdl'. LIBLTDL will be prefixed with +# '${top_builddir}/' and LTDLINCL will be prefixed with '${top_srcdir}/' +# (note the single quotes!). If your package is not flat and you're not +# using automake, define top_builddir and top_srcdir appropriately in +# the Makefiles. +AC_DEFUN([AC_LIBLTDL_CONVENIENCE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl + case $enable_ltdl_convenience in + no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;; + "") enable_ltdl_convenience=yes + ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;; + esac + LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la + LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) + # For backwards non-gettext consistent compatibility... + INCLTDL="$LTDLINCL" +])# AC_LIBLTDL_CONVENIENCE + + +# AC_LIBLTDL_INSTALLABLE([DIRECTORY]) +# ----------------------------------- +# sets LIBLTDL to the link flags for the libltdl installable library and +# LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-install to the configure arguments. Note that +# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, +# and an installed libltdl is not found, it is assumed to be `libltdl'. +# LIBLTDL will be prefixed with '${top_builddir}/'# and LTDLINCL with +# '${top_srcdir}/' (note the single quotes!). If your package is not +# flat and you're not using automake, define top_builddir and top_srcdir +# appropriately in the Makefiles. +# In the future, this macro may have to be called after AC_PROG_LIBTOOL. +AC_DEFUN([AC_LIBLTDL_INSTALLABLE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl + AC_CHECK_LIB(ltdl, lt_dlinit, + [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no], + [if test x"$enable_ltdl_install" = xno; then + AC_MSG_WARN([libltdl not installed, but installation disabled]) + else + enable_ltdl_install=yes + fi + ]) + if test x"$enable_ltdl_install" = x"yes"; then + ac_configure_args="$ac_configure_args --enable-ltdl-install" + LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la + LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) + else + ac_configure_args="$ac_configure_args --enable-ltdl-install=no" + LIBLTDL="-lltdl" + LTDLINCL= + fi + # For backwards non-gettext consistent compatibility... + INCLTDL="$LTDLINCL" +])# AC_LIBLTDL_INSTALLABLE + + +# AC_LIBTOOL_CXX +# -------------- +# enable support for C++ libraries +AC_DEFUN([AC_LIBTOOL_CXX], +[AC_REQUIRE([_LT_AC_LANG_CXX]) +])# AC_LIBTOOL_CXX + + +# _LT_AC_LANG_CXX +# --------------- +AC_DEFUN([_LT_AC_LANG_CXX], +[AC_REQUIRE([AC_PROG_CXX]) +AC_REQUIRE([_LT_AC_PROG_CXXCPP]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}CXX]) +])# _LT_AC_LANG_CXX + +# _LT_AC_PROG_CXXCPP +# ------------------ +AC_DEFUN([_LT_AC_PROG_CXXCPP], +[ +AC_REQUIRE([AC_PROG_CXX]) +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +fi +])# _LT_AC_PROG_CXXCPP + +# AC_LIBTOOL_F77 +# -------------- +# enable support for Fortran 77 libraries +AC_DEFUN([AC_LIBTOOL_F77], +[AC_REQUIRE([_LT_AC_LANG_F77]) +])# AC_LIBTOOL_F77 + + +# _LT_AC_LANG_F77 +# --------------- +AC_DEFUN([_LT_AC_LANG_F77], +[AC_REQUIRE([AC_PROG_F77]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}F77]) +])# _LT_AC_LANG_F77 + + +# AC_LIBTOOL_GCJ +# -------------- +# enable support for GCJ libraries +AC_DEFUN([AC_LIBTOOL_GCJ], +[AC_REQUIRE([_LT_AC_LANG_GCJ]) +])# AC_LIBTOOL_GCJ + + +# _LT_AC_LANG_GCJ +# --------------- +AC_DEFUN([_LT_AC_LANG_GCJ], +[AC_PROVIDE_IFELSE([AC_PROG_GCJ],[], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],[], + [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ],[], + [ifdef([AC_PROG_GCJ],[AC_REQUIRE([AC_PROG_GCJ])], + [ifdef([A][M_PROG_GCJ],[AC_REQUIRE([A][M_PROG_GCJ])], + [AC_REQUIRE([A][C_PROG_GCJ_OR_A][M_PROG_GCJ])])])])])]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}GCJ]) +])# _LT_AC_LANG_GCJ + + +# AC_LIBTOOL_RC +# ------------- +# enable support for Windows resource files +AC_DEFUN([AC_LIBTOOL_RC], +[AC_REQUIRE([LT_AC_PROG_RC]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}RC]) +])# AC_LIBTOOL_RC + + +# AC_LIBTOOL_LANG_C_CONFIG +# ------------------------ +# Ensure that the configuration vars for the C compiler are +# suitably defined. Those variables are subsequently used by +# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. +AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG], [_LT_AC_LANG_C_CONFIG]) +AC_DEFUN([_LT_AC_LANG_C_CONFIG], +[lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_AC_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}\n' + +_LT_AC_SYS_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +AC_LIBTOOL_PROG_COMPILER_NO_RTTI($1) +AC_LIBTOOL_PROG_COMPILER_PIC($1) +AC_LIBTOOL_PROG_CC_C_O($1) +AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) +AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) +AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) +AC_LIBTOOL_SYS_LIB_STRIP +AC_LIBTOOL_DLOPEN_SELF + +# Report which library types will actually be built +AC_MSG_CHECKING([if libtool supports shared libraries]) +AC_MSG_RESULT([$can_build_shared]) + +AC_MSG_CHECKING([whether to build shared libraries]) +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case $host_os in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +AC_MSG_RESULT([$enable_shared]) + +AC_MSG_CHECKING([whether to build static libraries]) +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +AC_MSG_RESULT([$enable_static]) + +AC_LIBTOOL_CONFIG($1) + +AC_LANG_POP +CC="$lt_save_CC" +])# AC_LIBTOOL_LANG_C_CONFIG + + +# AC_LIBTOOL_LANG_CXX_CONFIG +# -------------------------- +# Ensure that the configuration vars for the C compiler are +# suitably defined. Those variables are subsequently used by +# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. +AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG], [_LT_AC_LANG_CXX_CONFIG(CXX)]) +AC_DEFUN([_LT_AC_LANG_CXX_CONFIG], +[AC_LANG_PUSH(C++) +AC_REQUIRE([AC_PROG_CXX]) +AC_REQUIRE([_LT_AC_PROG_CXXCPP]) + +_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_AC_TAGVAR(allow_undefined_flag, $1)= +_LT_AC_TAGVAR(always_export_symbols, $1)=no +_LT_AC_TAGVAR(archive_expsym_cmds, $1)= +_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_AC_TAGVAR(hardcode_direct, $1)=no +_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_AC_TAGVAR(hardcode_libdir_separator, $1)= +_LT_AC_TAGVAR(hardcode_minus_L, $1)=no +_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_AC_TAGVAR(hardcode_automatic, $1)=no +_LT_AC_TAGVAR(module_cmds, $1)= +_LT_AC_TAGVAR(module_expsym_cmds, $1)= +_LT_AC_TAGVAR(link_all_deplibs, $1)=unknown +_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_AC_TAGVAR(no_undefined_flag, $1)= +_LT_AC_TAGVAR(whole_archive_flag_spec, $1)= +_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Dependencies to place before and after the object being linked: +_LT_AC_TAGVAR(predep_objects, $1)= +_LT_AC_TAGVAR(postdep_objects, $1)= +_LT_AC_TAGVAR(predeps, $1)= +_LT_AC_TAGVAR(postdeps, $1)= +_LT_AC_TAGVAR(compiler_lib_search_path, $1)= + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_AC_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_AC_SYS_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_LD=$LD +lt_save_GCC=$GCC +GCC=$GXX +lt_save_with_gnu_ld=$with_gnu_ld +lt_save_path_LD=$lt_cv_path_LD +if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx +else + $as_unset lt_cv_prog_gnu_ld +fi +if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX +else + $as_unset lt_cv_path_LD +fi +test -z "${LDCXX+set}" || LD=$LDCXX +CC=${CXX-"c++"} +compiler=$CC +_LT_AC_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) + +# We don't want -fno-exception wen compiling C++ code, so set the +# no_builtin_flag separately +if test "$GXX" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' +else + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= +fi + +if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + AC_PROG_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ + grep 'no-whole-archive' > /dev/null; then + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + +else + GXX=no + with_gnu_ld=no + wlarc= +fi + +# PORTME: fill in a description of your system's C++ link characteristics +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +_LT_AC_TAGVAR(ld_shlibs, $1)=yes +case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_AC_TAGVAR(archive_cmds, $1)='' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + else + # We have old collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_automatic, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes + fi + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + freebsd[[12]]*) + # C++ shared libraries reported to be fairly broken before switch to ELF + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + freebsd-elf*) + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + ;; + gnu*) + ;; + hpux9*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[[-]]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + ;; + *) + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + interix3*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' + fi + fi + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + linux*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc*) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC*) + # Portland Group C++ compiler + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + esac + ;; + lynxos*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + m88k*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + openbsd2*) + # C++ shared libraries are fairly broken + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + openbsd*) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; + osf3*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ + $rm $lib.exp' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + psos*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_AC_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_AC_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The C++ compiler is used as linker so we must use $wl + # flag to pass the commands to the underlying system + # linker. We must also pass each convience library through + # to the system linker between allextract/defaultextract. + # The C++ compiler will combine linker options so we + # cannot just pass the convience library names through + # without $wl. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' + ;; + esac + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | grep -v '^2\.7' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + fi + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + fi + ;; + esac + ;; + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + # So that behaviour is only enabled if SCOABSPATH is set to a + # non-empty value in the environment. Most likely only useful for + # creating official distributions of packages. + # This is a hack until libtool officially supports absolute path + # names for shared libraries. + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + vxworks*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; +esac +AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) +test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_AC_TAGVAR(GCC, $1)="$GXX" +_LT_AC_TAGVAR(LD, $1)="$LD" + +AC_LIBTOOL_POSTDEP_PREDEP($1) +AC_LIBTOOL_PROG_COMPILER_PIC($1) +AC_LIBTOOL_PROG_CC_C_O($1) +AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) +AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) +AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) + +AC_LIBTOOL_CONFIG($1) + +AC_LANG_POP +CC=$lt_save_CC +LDCXX=$LD +LD=$lt_save_LD +GCC=$lt_save_GCC +with_gnu_ldcxx=$with_gnu_ld +with_gnu_ld=$lt_save_with_gnu_ld +lt_cv_path_LDCXX=$lt_cv_path_LD +lt_cv_path_LD=$lt_save_path_LD +lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld +lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +])# AC_LIBTOOL_LANG_CXX_CONFIG + +# AC_LIBTOOL_POSTDEP_PREDEP([TAGNAME]) +# ------------------------------------ +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP],[ +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +ifelse([$1],[],[cat > conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext <> "$cfgfile" +ifelse([$1], [], +[#! $SHELL + +# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 +# Free Software Foundation, Inc. +# +# This file is part of GNU Libtool: +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# The names of the tagged configurations supported by this script. +available_tags= + +# ### BEGIN LIBTOOL CONFIG], +[# ### BEGIN LIBTOOL TAG CONFIG: $tagname]) + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$_LT_AC_TAGVAR(archive_cmds_need_lc, $1) + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1) + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_[]_LT_AC_TAGVAR(compiler, $1) + +# Is the compiler the GNU C compiler? +with_gcc=$_LT_AC_TAGVAR(GCC, $1) + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_[]_LT_AC_TAGVAR(LD, $1) + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_[]_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1) + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_static, $1) + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_[]_LT_AC_TAGVAR(export_dynamic_flag_spec, $1) + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_[]_LT_AC_TAGVAR(whole_archive_flag_spec, $1) + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_[]_LT_AC_TAGVAR(thread_safe_flag_spec, $1) + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_cmds, $1) +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_new_cmds, $1) + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) + +# Commands used to build and install a shared archive. +archive_cmds=$lt_[]_LT_AC_TAGVAR(archive_cmds, $1) +archive_expsym_cmds=$lt_[]_LT_AC_TAGVAR(archive_expsym_cmds, $1) +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_[]_LT_AC_TAGVAR(module_cmds, $1) +module_expsym_cmds=$lt_[]_LT_AC_TAGVAR(module_expsym_cmds, $1) + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_[]_LT_AC_TAGVAR(predep_objects, $1) + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_[]_LT_AC_TAGVAR(postdep_objects, $1) + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_[]_LT_AC_TAGVAR(predeps, $1) + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_[]_LT_AC_TAGVAR(postdeps, $1) + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_[]_LT_AC_TAGVAR(allow_undefined_flag, $1) + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_[]_LT_AC_TAGVAR(no_undefined_flag, $1) + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$_LT_AC_TAGVAR(hardcode_action, $1) + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1) + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_separator, $1) + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$_LT_AC_TAGVAR(hardcode_direct, $1) + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$_LT_AC_TAGVAR(hardcode_minus_L, $1) + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1) + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$_LT_AC_TAGVAR(hardcode_automatic, $1) + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$_LT_AC_TAGVAR(link_all_deplibs, $1) + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$_LT_AC_TAGVAR(fix_srcfile_path, $1)" + +# Set to yes if exported symbols are required. +always_export_symbols=$_LT_AC_TAGVAR(always_export_symbols, $1) + +# The commands to list exported symbols. +export_symbols_cmds=$lt_[]_LT_AC_TAGVAR(export_symbols_cmds, $1) + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_[]_LT_AC_TAGVAR(exclude_expsyms, $1) + +# Symbols that must always be exported. +include_expsyms=$lt_[]_LT_AC_TAGVAR(include_expsyms, $1) + +ifelse([$1],[], +[# ### END LIBTOOL CONFIG], +[# ### END LIBTOOL TAG CONFIG: $tagname]) + +__EOF__ + +ifelse([$1],[], [ + case $host_os in + aix3*) + cat <<\EOF >> "$cfgfile" + +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +EOF + ;; + esac + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || \ + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +]) +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi +])# AC_LIBTOOL_CONFIG + + +# AC_LIBTOOL_PROG_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------------------- +AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], +[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl + +_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + + AC_LIBTOOL_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +])# AC_LIBTOOL_PROG_COMPILER_NO_RTTI + + +# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE +# --------------------------------- +AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], +[AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_NM]) +AC_REQUIRE([AC_OBJEXT]) +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Transform an extracted symbol line into a proper C declaration +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) # Its linker distinguishes data from code symbols + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + ;; +linux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDGIRSTW]]' + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Try without a prefix undercore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext < $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if grep ' nm_test_var$' "$nlist" >/dev/null; then + if grep ' nm_test_func$' "$nlist" >/dev/null; then + cat < conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif + +EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' + + cat <> conftest.$ac_ext +#if defined (__STDC__) && __STDC__ +# define lt_ptr_t void * +#else +# define lt_ptr_t char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + lt_ptr_t address; +} +lt_preloaded_symbols[[]] = +{ +EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext + cat <<\EOF >> conftest.$ac_ext + {0, (lt_ptr_t) 0} +}; + +#ifdef __cplusplus +} +#endif +EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -f conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi +]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE + + +# AC_LIBTOOL_PROG_COMPILER_PIC([TAGNAME]) +# --------------------------------------- +AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC], +[_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_AC_TAGVAR(lt_prog_compiler_static, $1)= + +AC_MSG_CHECKING([for $compiler option to produce PIC]) + ifelse([$1],[CXX],[ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | os2* | pw32*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix4* | aix5*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + icpc* | ecpc*) + # Intel C++ + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC*) + # Portland Group C++ compiler. + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + newsos6) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +AC_MSG_RESULT([$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)]) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)"; then + AC_LIBTOOL_COMPILER_OPTION([if $compiler PIC flag $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) works], + _LT_AC_TAGVAR(lt_prog_compiler_pic_works, $1), + [$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_AC_TAGVAR(lt_prog_compiler_static, $1)\" +AC_LIBTOOL_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_AC_TAGVAR(lt_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_AC_TAGVAR(lt_prog_compiler_static, $1)=]) +]) + + +# AC_LIBTOOL_PROG_LD_SHLIBS([TAGNAME]) +# ------------------------------------ +# See if the linker supports building shared libraries. +AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS], +[AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +ifelse([$1],[CXX],[ + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix4* | aix5*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + else + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_AC_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw*) + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([[^ ]]*\) [[^ ]]*/\1 DATA/;/^I /d;/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' + ;; + *) + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +],[ + runpath_var= + _LT_AC_TAGVAR(allow_undefined_flag, $1)= + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_AC_TAGVAR(archive_cmds, $1)= + _LT_AC_TAGVAR(archive_expsym_cmds, $1)= + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)= + _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + _LT_AC_TAGVAR(thread_safe_flag_spec, $1)= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_minus_L, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(link_all_deplibs, $1)=unknown + _LT_AC_TAGVAR(hardcode_automatic, $1)=no + _LT_AC_TAGVAR(module_cmds, $1)= + _LT_AC_TAGVAR(module_expsym_cmds, $1)= + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_AC_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_AC_TAGVAR(exclude_expsyms, $1)="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + _LT_CC_BASENAME([$compiler]) + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + interix3*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + else + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_AC_TAGVAR(archive_cmds, $1)='' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + else + # We have old collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + # see comment about different semantics on the GNU ld section + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + bsdi[[45]]*) + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_AC_TAGVAR(old_archive_cmds, $1)='lib /OUT:$oldlib$oldobjs$old_deplibs' + _LT_AC_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_automatic, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi + ;; + + dgux*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + freebsd1*) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + openbsd*) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(archive_cmds, $1)='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; + *) + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_AC_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7*) + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi +]) +AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) +test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_AC_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_MSG_CHECKING([whether -lc should be explicitly linked in]) + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_AC_TAGVAR(allow_undefined_flag, $1) + _LT_AC_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_AC_TAGVAR(archive_cmds, $1) 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) + then + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + else + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_AC_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + AC_MSG_RESULT([$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)]) + ;; + esac + fi + ;; +esac +])# AC_LIBTOOL_PROG_LD_SHLIBS + + +# _LT_AC_FILE_LTDLL_C +# ------------------- +# Be careful that the start marker always follows a newline. +AC_DEFUN([_LT_AC_FILE_LTDLL_C], [ +# /* ltdll.c starts here */ +# #define WIN32_LEAN_AND_MEAN +# #include +# #undef WIN32_LEAN_AND_MEAN +# #include +# +# #ifndef __CYGWIN__ +# # ifdef __CYGWIN32__ +# # define __CYGWIN__ __CYGWIN32__ +# # endif +# #endif +# +# #ifdef __cplusplus +# extern "C" { +# #endif +# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved); +# #ifdef __cplusplus +# } +# #endif +# +# #ifdef __CYGWIN__ +# #include +# DECLARE_CYGWIN_DLL( DllMain ); +# #endif +# HINSTANCE __hDllInstance_base; +# +# BOOL APIENTRY +# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved) +# { +# __hDllInstance_base = hInst; +# return TRUE; +# } +# /* ltdll.c ends here */ +])# _LT_AC_FILE_LTDLL_C + + +# _LT_AC_TAGVAR(VARNAME, [TAGNAME]) +# --------------------------------- +AC_DEFUN([_LT_AC_TAGVAR], [ifelse([$2], [], [$1], [$1_$2])]) + + +# old names +AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL]) +AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) +AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) +AC_DEFUN([AM_PROG_LD], [AC_PROG_LD]) +AC_DEFUN([AM_PROG_NM], [AC_PROG_NM]) + +# This is just to silence aclocal about the macro not being used +ifelse([AC_DISABLE_FAST_INSTALL]) + +AC_DEFUN([LT_AC_PROG_GCJ], +[AC_CHECK_TOOL(GCJ, gcj, no) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS) +]) + +AC_DEFUN([LT_AC_PROG_RC], +[AC_CHECK_TOOL(RC, windres, no) +]) + +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +# LT_AC_PROG_SED +# -------------- +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +AC_DEFUN([LT_AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_MSG_RESULT([$SED]) +]) + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/addopcodes.awk b/libraries/sqlite/unix/sqlite-3.5.1/addopcodes.awk new file mode 100644 index 0000000000..b806b1d96c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/addopcodes.awk @@ -0,0 +1,32 @@ +#!/usr/bin/awk +# +# This script appends additional token codes to the end of the +# parse.h file that lemon generates. These extra token codes are +# not used by the parser. But they are used by the tokenizer and/or +# the code generator. +# +# +BEGIN { + max = 0 +} +/^#define TK_/ { + print $0 + if( max<$3 ) max = $3 +} +END { + printf "#define TK_%-29s %4d\n", "TO_TEXT", max+1 + printf "#define TK_%-29s %4d\n", "TO_BLOB", max+2 + printf "#define TK_%-29s %4d\n", "TO_NUMERIC", max+3 + printf "#define TK_%-29s %4d\n", "TO_INT", max+4 + printf "#define TK_%-29s %4d\n", "TO_REAL", max+5 + printf "#define TK_%-29s %4d\n", "END_OF_FILE", max+6 + printf "#define TK_%-29s %4d\n", "ILLEGAL", max+7 + printf "#define TK_%-29s %4d\n", "SPACE", max+8 + printf "#define TK_%-29s %4d\n", "UNCLOSED_STRING", max+9 + printf "#define TK_%-29s %4d\n", "COMMENT", max+10 + printf "#define TK_%-29s %4d\n", "FUNCTION", max+11 + printf "#define TK_%-29s %4d\n", "COLUMN", max+12 + printf "#define TK_%-29s %4d\n", "AGG_FUNCTION", max+13 + printf "#define TK_%-29s %4d\n", "AGG_COLUMN", max+14 + printf "#define TK_%-29s %4d\n", "CONST_FUNC", max+15 +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/art/2005osaward.gif b/libraries/sqlite/unix/sqlite-3.5.1/art/2005osaward.gif new file mode 100644 index 0000000000..fa6d7d7c27 Binary files /dev/null and b/libraries/sqlite/unix/sqlite-3.5.1/art/2005osaward.gif differ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite.eps b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite.eps new file mode 100644 index 0000000000..1f334ecf7b Binary files /dev/null and b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite.eps differ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite.gif b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite.gif new file mode 100644 index 0000000000..5ec05b0be2 Binary files /dev/null and b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite.gif differ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/art/SQLiteLogo3.tiff b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLiteLogo3.tiff new file mode 100644 index 0000000000..70b88e7805 Binary files /dev/null and b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLiteLogo3.tiff differ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite_big.gif b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite_big.gif new file mode 100644 index 0000000000..dc9e6a0e6c Binary files /dev/null and b/libraries/sqlite/unix/sqlite-3.5.1/art/SQLite_big.gif differ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/art/nocopy.gif b/libraries/sqlite/unix/sqlite-3.5.1/art/nocopy.gif new file mode 100644 index 0000000000..cc4a59c4c0 Binary files /dev/null and b/libraries/sqlite/unix/sqlite-3.5.1/art/nocopy.gif differ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/art/powered_by_sqlite.gif b/libraries/sqlite/unix/sqlite-3.5.1/art/powered_by_sqlite.gif new file mode 100644 index 0000000000..5bfed023ee Binary files /dev/null and b/libraries/sqlite/unix/sqlite-3.5.1/art/powered_by_sqlite.gif differ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/config.guess b/libraries/sqlite/unix/sqlite-3.5.1/config.guess new file mode 100644 index 0000000000..6960a397af --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/config.guess @@ -0,0 +1,1532 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, +# Inc. + +timestamp='2007-01-15' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA +# 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Originally written by Per Bothner . +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# This script attempts to guess a canonical system name similar to +# config.sub. If it succeeds, it prints the system name on stdout, and +# exits with 0. Otherwise, it exits with 1. +# +# The plan is that this can be called by configure scripts if you +# don't specify an explicit build system type. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +if [ "${UNAME_SYSTEM}" = "Linux" ] ; then + eval $set_cc_for_build + cat << EOF > $dummy.c + #include + #ifdef __UCLIBC__ + # ifdef __UCLIBC_CONFIG_VERSION__ + LIBC=uclibc __UCLIBC_CONFIG_VERSION__ + # else + LIBC=uclibc + # endif + #else + LIBC=gnu + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep LIBC= | sed -e 's: ::g'` +fi + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep __ELF__ >/dev/null + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + exit ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm:riscos:*:*|arm:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:SunOS:5.*:*) + echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[45]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + grep __LP64__ >/dev/null + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + case ${UNAME_MACHINE} in + pc98) + echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + x86:Interix*:[3456]*) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + EM64T:Interix*:[3456]* | authenticamd:Interix*:[3456]*) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + arm*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo cris-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo crisv32-axis-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo frv-unknown-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips + #undef mipsel + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mipsel + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips + #else + CPU= + #endif + #endif +EOF + eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n ' + /^CPU/{ + s: ::g + p + }'`" + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips64 + #undef mips64el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mips64el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips64 + #else + CPU= + #endif + #endif +EOF + eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n ' + /^CPU/{ + s: ::g + p + }'`" + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + or32:Linux:*:*) + echo or32-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null + if test "$?" = 0 ; then LIBC="gnulibc1" ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo x86_64-unknown-linux-${LIBC} + exit ;; + xtensa:Linux:*:*) + echo xtensa-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + # The BFD linker knows what the default object file format is, so + # first see if it will tell us. cd to the root directory to prevent + # problems with other programs or directories called `ld' in the path. + # Set LC_ALL=C to ensure ld outputs messages in English. + ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \ + | sed -ne '/supported targets:/!d + s/[ ][ ]*/ /g + s/.*supported targets: *// + s/ .*// + p'` + case "$ld_supported_targets" in + elf32-i386) + TENTATIVE="${UNAME_MACHINE}-pc-linux-${LIBC}" + ;; + a.out-i386-linux) + echo "${UNAME_MACHINE}-pc-linux-${LIBC}aout" + exit ;; + coff-i386) + echo "${UNAME_MACHINE}-pc-linux-${LIBC}coff" + exit ;; + "") + # Either a pre-BFD a.out linker (linux-gnuoldld) or + # one that does not give us useful --help. + echo "${UNAME_MACHINE}-pc-linux-${LIBC}oldld" + exit ;; + esac + # This should get integrated into the C code below, but now we hack + if [ "$LIBC" != "gnu" ] ; then echo "$TENTATIVE" && exit 0 ; fi + # Determine whether the default compiler is a.out or elf + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + #ifdef __ELF__ + # ifdef __GLIBC__ + # if __GLIBC__ >= 2 + LIBC=gnu + # else + LIBC=gnulibc1 + # endif + # else + LIBC=gnulibc1 + # endif + #else + #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) + LIBC=gnu + #else + LIBC=gnuaout + #endif + #endif + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n ' + /^LIBC/{ + s: ::g + p + }'`" + test x"${LIBC}" != x && { + echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + exit + } + test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; } + ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i386. + echo i386-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in + unknown) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NSE-?:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; +esac + +#echo '(No uname command or uname output not recognized.)' 1>&2 +#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix\n"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + c34*) + echo c34-convex-bsd + exit ;; + c38*) + echo c38-convex-bsd + exit ;; + c4*) + echo c4-convex-bsd + exit ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/libraries/sqlite/unix/sqlite-3.5.1/config.sub b/libraries/sqlite/unix/sqlite-3.5.1/config.sub new file mode 100644 index 0000000000..2a4932e7f5 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/config.sub @@ -0,0 +1,1640 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, +# Inc. + +timestamp='2007-01-18' + +# This file is (in principle) common to ALL GNU software. +# The presence of a machine in this file suggests that SOME GNU software +# can handle that machine. It does not imply ALL GNU software can. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA +# 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \ + uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray) + os= + basic_machine=$1 + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ + | bfin \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx | dvp \ + | fido | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | mcore | mep \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64vr | mips64vrel \ + | mips64orion | mips64orionel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | mt \ + | msp430 \ + | nios | nios2 \ + | ns16k | ns32k \ + | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ + | pyramid \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]a*eb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu | strongarm \ + | tahoe | thumb | tic4x | tic80 | tron \ + | v850 | v850e \ + | we32k \ + | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \ + | z8k) + basic_machine=$basic_machine-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12) + # Motorola 68HC11/12. + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nios-* | nios2-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ + | pyramid-* \ + | romp-* | rs6000-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]a*eb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \ + | tahoe-* | thumb-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tron-* \ + | v850-* | v850e-* | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \ + | xstormy16-* | xtensa-* \ + | ymp-* \ + | z8k-*) + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16c) + basic_machine=cr16c-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; +# I'm not sure what "Sysv32" means. Should this be sysv3.2? + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mipsEE* | ee | ps2) + basic_machine=mips64r5900el-scei + case $os in + -linux*) + ;; + *) + os=-elf + ;; + esac + ;; + iop) + basic_machine=mipsel-scei + os=-irx + ;; + dvp) + basic_machine=dvp-scei + os=-elf + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc) basic_machine=powerpc-unknown + ;; + ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tic54x | c54x*) + basic_machine=tic54x-unknown + os=-coff + ;; + tic55x | c55x*) + basic_machine=tic55x-unknown + os=-coff + ;; + tic6x | c6x*) + basic_machine=tic6x-unknown + os=-coff + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -openbsd* | -solidbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* \ + | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -irx*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -zvmoe) + os=-zvmoe + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + # This also exists in the configure program, but was not the + # default. + # os=-sunos4 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/libraries/sqlite/unix/sqlite-3.5.1/configure b/libraries/sqlite/unix/sqlite-3.5.1/configure new file mode 100755 index 0000000000..c862e3ff51 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/configure @@ -0,0 +1,21144 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.61. +# +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +# 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +as_nl=' +' +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + { (exit 1); exit 1; } +fi + +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# CDPATH. +$as_unset CDPATH + + +if test "x$CONFIG_SHELL" = x; then + if (eval ":") 2>/dev/null; then + as_have_required=yes +else + as_have_required=no +fi + + if test $as_have_required = yes && (eval ": +(as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test \$exitcode = 0) || { (exit 1); exit 1; } + +( + as_lineno_1=\$LINENO + as_lineno_2=\$LINENO + test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && + test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } +") 2> /dev/null; then + : +else + as_candidate_shells= + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + case $as_dir in + /*) + for as_base in sh bash ksh sh5; do + as_candidate_shells="$as_candidate_shells $as_dir/$as_base" + done;; + esac +done +IFS=$as_save_IFS + + + for as_shell in $as_candidate_shells $SHELL; do + # Try only shells that exist, to save several forks. + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { ("$as_shell") 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + +: +_ASEOF +}; then + CONFIG_SHELL=$as_shell + as_have_required=yes + if { "$as_shell" 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + +: +(as_func_return () { + (exit $1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = "$1" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test $exitcode = 0) || { (exit 1); exit 1; } + +( + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } + +_ASEOF +}; then + break +fi + +fi + + done + + if test "x$CONFIG_SHELL" != x; then + for as_var in BASH_ENV ENV + do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + done + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} +fi + + + if test $as_have_required = no; then + echo This script requires a shell more modern than all the + echo shells that I found on your system. Please install a + echo modern shell, or manually run the script under such a + echo shell if you do have one. + { (exit 1); exit 1; } +fi + + +fi + +fi + + + +(eval "as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test \$exitcode = 0") || { + echo No shell found that supports shell functions. + echo Please tell autoconf@gnu.org about your system, + echo including any error possibly output before this + echo message +} + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir +fi +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + + + +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} + +case X$ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$ECHO" | sed 's,\\\\\$\\$0,'$0','` + ;; +esac + +echo=${ECHO-echo} +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then + # Yippee, $echo works! + : +else + # Restart under the correct shell. + exec $SHELL "$0" --no-reexec ${1+"$@"} +fi + +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat </dev/null 2>&1 && unset CDPATH + +if test -z "$ECHO"; then +if test "X${echo_test_string+set}" != Xset; then +# find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if (echo_test_string=`eval $cmd`) 2>/dev/null && + echo_test_string=`eval $cmd` && + (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null + then + break + fi + done +fi + +if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : +else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. + + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" + + if test "X$echo" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + echo='print -r' + elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"} + else + # Try using printf. + echo='printf %s\n' + if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + echo="$CONFIG_SHELL $0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$CONFIG_SHELL $0 --fallback-echo" + else + # maybe with a smaller string... + prev=: + + for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do + if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null + then + break + fi + prev="$cmd" + done + + if test "$prev" != 'sed 50q "$0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"} + else + # Oops. We lost completely, so just stick with echo. + echo=echo + fi + fi + fi + fi +fi +fi + +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +ECHO=$echo +if test "X$ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then + ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" +fi + + + + +tagnames=${tagnames+${tagnames},}CXX + +tagnames=${tagnames+${tagnames},}F77 + +exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= +SHELL=${CONFIG_SHELL-/bin/sh} + +# Identity of this package. +PACKAGE_NAME= +PACKAGE_TARNAME= +PACKAGE_VERSION= +PACKAGE_STRING= +PACKAGE_BUGREPORT= + +ac_unique_file="src/sqlite.h.in" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='SHELL +PATH_SEPARATOR +PACKAGE_NAME +PACKAGE_TARNAME +PACKAGE_VERSION +PACKAGE_STRING +PACKAGE_BUGREPORT +exec_prefix +prefix +program_transform_name +bindir +sbindir +libexecdir +datarootdir +datadir +sysconfdir +sharedstatedir +localstatedir +includedir +oldincludedir +docdir +infodir +htmldir +dvidir +pdfdir +psdir +libdir +localedir +mandir +DEFS +ECHO_C +ECHO_N +ECHO_T +LIBS +build_alias +host_alias +target_alias +build +build_cpu +build_vendor +build_os +host +host_cpu +host_vendor +host_os +CC +CFLAGS +LDFLAGS +CPPFLAGS +ac_ct_CC +EXEEXT +OBJEXT +GREP +EGREP +LN_S +ECHO +AR +RANLIB +STRIP +CPP +CXX +CXXFLAGS +ac_ct_CXX +CXXCPP +F77 +FFLAGS +ac_ct_F77 +LIBTOOL +INSTALL_PROGRAM +INSTALL_SCRIPT +INSTALL_DATA +AWK +program_prefix +VERSION +RELEASE +VERSION_NUMBER +BUILD_CC +BUILD_CFLAGS +SQLITE_THREADSAFE +TARGET_THREAD_LIB +XTHREADCONNECT +THREADSOVERRIDELOCKS +ALLOWRELEASE +TEMP_STORE +BUILD_EXEEXT +OS_UNIX +OS_WIN +OS_OS2 +TARGET_EXEEXT +TCL_VERSION +TCL_BIN_DIR +TCL_SRC_DIR +TCL_LIBS +TCL_INCLUDE_SPEC +TCL_LIB_FILE +TCL_LIB_FLAG +TCL_LIB_SPEC +TCL_STUB_LIB_FILE +TCL_STUB_LIB_FLAG +TCL_STUB_LIB_SPEC +HAVE_TCL +TARGET_READLINE_LIBS +TARGET_READLINE_INC +TARGET_HAVE_READLINE +TARGET_DEBUG +LIBOBJS +LTLIBOBJS' +ac_subst_files='' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CPP +CXX +CXXFLAGS +CCC +CXXCPP +F77 +FFLAGS' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` + eval enable_$ac_feature=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` + eval enable_$ac_feature=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package | sed 's/[-.]/_/g'` + eval with_$ac_package=\$ac_optarg ;; + + -without-* | --without-*) + ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package | sed 's/[-.]/_/g'` + eval with_$ac_package=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) { echo "$as_me: error: unrecognized option: $ac_option +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { (exit 1); exit 1; }; } + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + { echo "$as_me: error: missing argument to $ac_option" >&2 + { (exit 1); exit 1; }; } +fi + +# Be sure to have absolute directory names. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; } +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used." >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + { echo "$as_me: error: Working directory cannot be determined" >&2 + { (exit 1); exit 1; }; } +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + { echo "$as_me: error: pwd does not report name of working directory" >&2 + { (exit 1); exit 1; }; } + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$0" || +$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$0" : 'X\(//\)[^/]' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +echo X"$0" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { (exit 1); exit 1; }; } +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2 + { (exit 1); exit 1; }; } + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures this package to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + + cat <<\_ACEOF + +Optional Features: + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-shared[=PKGS] build shared libraries [default=yes] + --enable-static[=PKGS] build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + --enable-threadsafe Support threadsafe operation + --enable-cross-thread-connections + Allow connection sharing across threads + --enable-threads-override-locks + Threads can override each others locks + --enable-releasemode Support libtool link to release mode + --enable-tempstore Use an in-ram database for temporary tables + (never,no,yes,always) + --disable-tcl do not build TCL extension + --disable-readline disable readline support [default=detect] + --enable-debug enable debugging & verbose explain + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-tags[=TAGS] include additional configurations [automatic] + --with-hints=FILE Read configuration options from FILE + --with-tcl=DIR directory containing tcl configuration + (tclConfig.sh) + --with-readline-lib specify readline library + --with-readline-inc specify readline include paths + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CPP C preprocessor + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CXXCPP C++ preprocessor + F77 Fortran 77 compiler command + FFLAGS Fortran 77 compiler flags + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +configure +generated by GNU Autoconf 2.61 + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by $as_me, which was +generated by GNU Autoconf 2.61. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + echo "PATH: $as_dir" +done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; + 2) + ac_configure_args1="$ac_configure_args1 '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + ac_configure_args="$ac_configure_args '$ac_arg'" + ;; + esac + done +done +$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } +$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + cat <<\_ASBOX +## ---------------- ## +## Cache variables. ## +## ---------------- ## +_ASBOX + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 +echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + *) $as_unset $ac_var ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + cat <<\_ASBOX +## ----------------- ## +## Output variables. ## +## ----------------- ## +_ASBOX + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + cat <<\_ASBOX +## ------------------- ## +## File substitutions. ## +## ------------------- ## +_ASBOX + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + cat <<\_ASBOX +## ----------- ## +## confdefs.h. ## +## ----------- ## +_ASBOX + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + echo "$as_me: caught signal $ac_signal" + echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer explicitly selected file to automatically selected ones. +if test -n "$CONFIG_SITE"; then + set x "$CONFIG_SITE" +elif test "x$prefix" != xNONE; then + set x "$prefix/share/config.site" "$prefix/etc/config.site" +else + set x "$ac_default_prefix/share/config.site" \ + "$ac_default_prefix/etc/config.site" +fi +shift +for ac_site_file +do + if test -r "$ac_site_file"; then + { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 +echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special + # files actually), so we avoid doing that. + if test -f "$cache_file"; then + { echo "$as_me:$LINENO: loading cache $cache_file" >&5 +echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { echo "$as_me:$LINENO: creating cache $cache_file" >&5 +echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 +echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 +echo "$as_me: former value: $ac_old_val" >&2;} + { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 +echo "$as_me: current value: $ac_new_val" >&2;} + ac_cache_corrupted=: + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 +echo "$as_me: error: changes in the environment can compromise the build" >&2;} + { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { (exit 1); exit 1; }; } +fi + + + + + + + + + + + + + + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +# The following RCS revision string applies to configure.in +# $Revision: 1.43 $ + +######### +# Programs needed +# +# Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_shared=yes +fi + + +# Check whether --enable-static was given. +if test "${enable_static+set}" = set; then + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_static=yes +fi + + +# Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_fast_install=yes +fi + + +ac_aux_dir= +for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5 +echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;} + { (exit 1); exit 1; }; } +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + { { echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5 +echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;} + { (exit 1); exit 1; }; } + +{ echo "$as_me:$LINENO: checking build system type" >&5 +echo $ECHO_N "checking build system type... $ECHO_C" >&6; } +if test "${ac_cv_build+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 +echo "$as_me: error: cannot guess build type; you must specify one" >&2;} + { (exit 1); exit 1; }; } +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5 +echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;} + { (exit 1); exit 1; }; } + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_build" >&5 +echo "${ECHO_T}$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) { { echo "$as_me:$LINENO: error: invalid value of canonical build" >&5 +echo "$as_me: error: invalid value of canonical build" >&2;} + { (exit 1); exit 1; }; };; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ echo "$as_me:$LINENO: checking host system type" >&5 +echo $ECHO_N "checking host system type... $ECHO_C" >&6; } +if test "${ac_cv_host+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 +echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} + { (exit 1); exit 1; }; } +fi + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_host" >&5 +echo "${ECHO_T}$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) { { echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 +echo "$as_me: error: invalid value of canonical host" >&2;} + { (exit 1); exit 1; }; };; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&5 +echo "$as_me: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + +# Provide some information about the compiler. +echo "$as_me:$LINENO: checking for C compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (ac_try="$ac_compiler --version >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler --version >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -v >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -v >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -V >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -V >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6; } +ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` +# +# List of possible output files, starting from the most likely. +# The algorithm is not robust to junk in `.', hence go to wildcards (a.*) +# only as a last resort. b.out is created by i960 compilers. +ac_files='a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out' +# +# The IRIX 6 linker writes into existing files which may not be +# executable, retaining their permissions. Remove them first so a +# subsequent execution test works. +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { (ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi + +{ echo "$as_me:$LINENO: result: $ac_file" >&5 +echo "${ECHO_T}$ac_file" >&6; } +if test -z "$ac_file"; then + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: C compiler cannot create executables +See \`config.log' for more details." >&5 +echo "$as_me: error: C compiler cannot create executables +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } +fi + +ac_exeext=$ac_cv_exeext + +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ echo "$as_me:$LINENO: checking whether the C compiler works" >&5 +echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6; } +# FIXME: These cross compiler hacks should be removed for Autoconf 3.0 +# If not cross compiling, check that we can run a simple program. +if test "$cross_compiling" != yes; then + if { ac_try='./$ac_file' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { echo "$as_me:$LINENO: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + fi + fi +fi +{ echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } + +rm -f a.out a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6; } +{ echo "$as_me:$LINENO: result: $cross_compiling" >&5 +echo "${ECHO_T}$cross_compiling" >&6; } + +{ echo "$as_me:$LINENO: checking for suffix of executables" >&5 +echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6; } +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest$ac_cv_exeext +{ echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 +echo "${ECHO_T}$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +{ echo "$as_me:$LINENO: checking for suffix of object files" >&5 +echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6; } +if test "${ac_cv_objext+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 +echo "${ECHO_T}$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6; } +if test "${ac_cv_c_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_compiler_gnu=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6; } +GCC=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 +echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6; } +if test "${ac_cv_prog_cc_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + CFLAGS="" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 +echo $ECHO_N "checking for $CC option to accept ISO C89... $ECHO_C" >&6; } +if test "${ac_cv_prog_cc_c89+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_c89=$ac_arg +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { echo "$as_me:$LINENO: result: none needed" >&5 +echo "${ECHO_T}none needed" >&6; } ;; + xno) + { echo "$as_me:$LINENO: result: unsupported" >&5 +echo "${ECHO_T}unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_c89" >&6; } ;; +esac + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +{ echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5 +echo $ECHO_N "checking for a sed that does not truncate output... $ECHO_C" >&6; } +if test "${lt_cv_path_SED+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done + +fi + +SED=$lt_cv_path_SED +{ echo "$as_me:$LINENO: result: $SED" >&5 +echo "${ECHO_T}$SED" >&6; } + +{ echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5 +echo $ECHO_N "checking for grep that handles long lines and -e... $ECHO_C" >&6; } +if test "${ac_cv_path_GREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Extract the first word of "grep ggrep" to use in msg output +if test -z "$GREP"; then +set dummy grep ggrep; ac_prog_name=$2 +if test "${ac_cv_path_GREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_path_GREP_found=false +# Loop through the user's path and test for each of PROGNAME-LIST +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue + # Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + ac_count=`expr $ac_count + 1` + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + + $ac_path_GREP_found && break 3 + done +done + +done +IFS=$as_save_IFS + + +fi + +GREP="$ac_cv_path_GREP" +if test -z "$GREP"; then + { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + { (exit 1); exit 1; }; } +fi + +else + ac_cv_path_GREP=$GREP +fi + + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5 +echo "${ECHO_T}$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ echo "$as_me:$LINENO: checking for egrep" >&5 +echo $ECHO_N "checking for egrep... $ECHO_C" >&6; } +if test "${ac_cv_path_EGREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + # Extract the first word of "egrep" to use in msg output +if test -z "$EGREP"; then +set dummy egrep; ac_prog_name=$2 +if test "${ac_cv_path_EGREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_path_EGREP_found=false +# Loop through the user's path and test for each of PROGNAME-LIST +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue + # Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + ac_count=`expr $ac_count + 1` + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + + $ac_path_EGREP_found && break 3 + done +done + +done +IFS=$as_save_IFS + + +fi + +EGREP="$ac_cv_path_EGREP" +if test -z "$EGREP"; then + { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + { (exit 1); exit 1; }; } +fi + +else + ac_cv_path_EGREP=$EGREP +fi + + + fi +fi +{ echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5 +echo "${ECHO_T}$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { echo "$as_me:$LINENO: checking for ld used by $CC" >&5 +echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { echo "$as_me:$LINENO: checking for GNU ld" >&5 +echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6; } +else + { echo "$as_me:$LINENO: checking for non-GNU ld" >&5 +echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6; } +fi +if test "${lt_cv_path_LD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +echo "${ECHO_T}$LD" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi +test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 +echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} + { (exit 1); exit 1; }; } +{ echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 +echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6; } +if test "${lt_cv_prog_gnu_ld+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + +{ echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5 +echo $ECHO_N "checking for $LD option to reload object files... $ECHO_C" >&6; } +if test "${lt_cv_ld_reload_flag+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5 +echo "${ECHO_T}$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + +{ echo "$as_me:$LINENO: checking for BSD-compatible nm" >&5 +echo $ECHO_N "checking for BSD-compatible nm... $ECHO_C" >&6; } +if test "${lt_cv_path_NM+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm +fi +fi +{ echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5 +echo "${ECHO_T}$lt_cv_path_NM" >&6; } +NM="$lt_cv_path_NM" + +{ echo "$as_me:$LINENO: checking whether ln -s works" >&5 +echo $ECHO_N "checking whether ln -s works... $ECHO_C" >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +else + { echo "$as_me:$LINENO: result: no, using $LN_S" >&5 +echo "${ECHO_T}no, using $LN_S" >&6; } +fi + +{ echo "$as_me:$LINENO: checking how to recognise dependent libraries" >&5 +echo $ECHO_N "checking how to recognise dependent libraries... $ECHO_C" >&6; } +if test "${lt_cv_deplibs_check_method+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix4* | aix5*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump'. + lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | kfreebsd*-gnu | dragonfly*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix3*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +nto-qnx*) + lt_cv_deplibs_check_method=unknown + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5 +echo "${ECHO_T}$lt_cv_deplibs_check_method" >&6; } +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then + enableval=$enable_libtool_lock; +fi + +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line 3685 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + { echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5 +echo $ECHO_N "checking whether the C compiler needs -belf... $ECHO_C" >&6; } +if test "${lt_cv_cc_needs_belf+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + lt_cv_cc_needs_belf=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + lt_cv_cc_needs_belf=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5 +echo "${ECHO_T}$lt_cv_cc_needs_belf" >&6; } + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) LD="${LD-ld} -64" ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + + +esac + +need_locks="$enable_libtool_lock" + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 +echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if test "${ac_cv_prog_CPP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi + +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi + +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ echo "$as_me:$LINENO: result: $CPP" >&5 +echo "${ECHO_T}$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi + +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi + +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&5 +echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ echo "$as_me:$LINENO: checking for ANSI C header files" >&5 +echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6; } +if test "${ac_cv_header_stdc+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_header_stdc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_header_stdc=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then + : +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi + + +fi +fi +{ echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 +echo "${ECHO_T}$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 +_ACEOF + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. + + + + + + + + + +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +{ echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + eval "$as_ac_Header=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_Header=no" +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +ac_res=`eval echo '${'$as_ac_Header'}'` + { echo "$as_me:$LINENO: result: $ac_res" >&5 +echo "${ECHO_T}$ac_res" >&6; } +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + +for ac_header in dlfcn.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + { echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +ac_res=`eval echo '${'$as_ac_Header'}'` + { echo "$as_me:$LINENO: result: $ac_res" >&5 +echo "${ECHO_T}$ac_res" >&6; } +else + # Is the header compilable? +{ echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_compiler=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6; } + +# Is the header present? +{ echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi + +rm -f conftest.err conftest.$ac_ext +{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + + ;; +esac +{ echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +ac_res=`eval echo '${'$as_ac_Header'}'` + { echo "$as_me:$LINENO: result: $ac_res" >&5 +echo "${ECHO_T}$ac_res" >&6; } + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { echo "$as_me:$LINENO: result: $CXX" >&5 +echo "${ECHO_T}$CXX" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 +echo "${ECHO_T}$ac_ct_CXX" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +echo "$as_me:$LINENO: checking for C++ compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (ac_try="$ac_compiler --version >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler --version >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -v >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -v >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -V >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -V >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +{ echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6; } +if test "${ac_cv_cxx_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_compiler_gnu=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6; } +GXX=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 +echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6; } +if test "${ac_cv_prog_cxx_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cxx_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + CXXFLAGS="" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cxx_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ echo "$as_me:$LINENO: checking how to run the C++ preprocessor" >&5 +echo $ECHO_N "checking how to run the C++ preprocessor... $ECHO_C" >&6; } +if test -z "$CXXCPP"; then + if test "${ac_cv_prog_CXXCPP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi + +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi + +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ echo "$as_me:$LINENO: result: $CXXCPP" >&5 +echo "${ECHO_T}$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi + +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi + +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { echo "$as_me:$LINENO: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&5 +echo "$as_me: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +fi + + +ac_ext=f +ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' +ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_f77_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in g77 xlf f77 frt pgf77 cf77 fort77 fl32 af77 xlf90 f90 pgf90 pghpf epcf90 gfortran g95 xlf95 f95 fort ifort ifc efc pgf95 lf95 ftn + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$F77"; then + ac_cv_prog_F77="$F77" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_F77="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +F77=$ac_cv_prog_F77 +if test -n "$F77"; then + { echo "$as_me:$LINENO: result: $F77" >&5 +echo "${ECHO_T}$F77" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$F77" && break + done +fi +if test -z "$F77"; then + ac_ct_F77=$F77 + for ac_prog in g77 xlf f77 frt pgf77 cf77 fort77 fl32 af77 xlf90 f90 pgf90 pghpf epcf90 gfortran g95 xlf95 f95 fort ifort ifc efc pgf95 lf95 ftn +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_F77"; then + ac_cv_prog_ac_ct_F77="$ac_ct_F77" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_F77="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_F77=$ac_cv_prog_ac_ct_F77 +if test -n "$ac_ct_F77"; then + { echo "$as_me:$LINENO: result: $ac_ct_F77" >&5 +echo "${ECHO_T}$ac_ct_F77" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$ac_ct_F77" && break +done + + if test "x$ac_ct_F77" = x; then + F77="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + F77=$ac_ct_F77 + fi +fi + + +# Provide some information about the compiler. +echo "$as_me:$LINENO: checking for Fortran 77 compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (ac_try="$ac_compiler --version >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler --version >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -v >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -v >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -V >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -V >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +rm -f a.out + +# If we don't use `.F' as extension, the preprocessor is not run on the +# input file. (Note that this only needs to work for GNU compilers.) +ac_save_ext=$ac_ext +ac_ext=F +{ echo "$as_me:$LINENO: checking whether we are using the GNU Fortran 77 compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU Fortran 77 compiler... $ECHO_C" >&6; } +if test "${ac_cv_f77_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF + program main +#ifndef __GNUC__ + choke me +#endif + + end +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_f77_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_compiler_gnu=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_f77_compiler_gnu=$ac_compiler_gnu + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_f77_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_f77_compiler_gnu" >&6; } +ac_ext=$ac_save_ext +ac_test_FFLAGS=${FFLAGS+set} +ac_save_FFLAGS=$FFLAGS +FFLAGS= +{ echo "$as_me:$LINENO: checking whether $F77 accepts -g" >&5 +echo $ECHO_N "checking whether $F77 accepts -g... $ECHO_C" >&6; } +if test "${ac_cv_prog_f77_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + FFLAGS=-g +cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_f77_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_f77_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_prog_f77_g=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_prog_f77_g" >&5 +echo "${ECHO_T}$ac_cv_prog_f77_g" >&6; } +if test "$ac_test_FFLAGS" = set; then + FFLAGS=$ac_save_FFLAGS +elif test $ac_cv_prog_f77_g = yes; then + if test "x$ac_cv_f77_compiler_gnu" = xyes; then + FFLAGS="-g -O2" + else + FFLAGS="-g" + fi +else + if test "x$ac_cv_f77_compiler_gnu" = xyes; then + FFLAGS="-O2" + else + FFLAGS= + fi +fi + +G77=`test $ac_compiler_gnu = yes && echo yes` +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! + +# find the maximum length of command line arguments +{ echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 +echo $ECHO_N "checking the maximum length of command line arguments... $ECHO_C" >&6; } +if test "${lt_cv_sys_max_cmd_len+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL $0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && + lt_cv_sys_max_cmd_len=$new_result && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + teststring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + ;; + esac + +fi + +if test -n $lt_cv_sys_max_cmd_len ; then + { echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5 +echo "${ECHO_T}$lt_cv_sys_max_cmd_len" >&6; } +else + { echo "$as_me:$LINENO: result: none" >&5 +echo "${ECHO_T}none" >&6; } +fi + + + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5 +echo $ECHO_N "checking command to parse $NM output from $compiler object... $ECHO_C" >&6; } +if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Transform an extracted symbol line into a proper C declaration +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32*) + symcode='[ABCDGISTW]' + ;; +hpux*) # Its linker distinguishes data from code symbols + if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + ;; +linux*) + if test "$host_cpu" = ia64; then + symcode='[ABCDGIRSTW]' + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Try without a prefix undercore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5 + (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if grep ' nm_test_var$' "$nlist" >/dev/null; then + if grep ' nm_test_func$' "$nlist" >/dev/null; then + cat < conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif + +EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' + + cat <> conftest.$ac_ext +#if defined (__STDC__) && __STDC__ +# define lt_ptr_t void * +#else +# define lt_ptr_t char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + lt_ptr_t address; +} +lt_preloaded_symbols[] = +{ +EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext + cat <<\EOF >> conftest.$ac_ext + {0, (lt_ptr_t) 0} +}; + +#ifdef __cplusplus +} +#endif +EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -f conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { echo "$as_me:$LINENO: result: failed" >&5 +echo "${ECHO_T}failed" >&6; } +else + { echo "$as_me:$LINENO: result: ok" >&5 +echo "${ECHO_T}ok" >&6; } +fi + +{ echo "$as_me:$LINENO: checking for objdir" >&5 +echo $ECHO_N "checking for objdir... $ECHO_C" >&6; } +if test "${lt_cv_objdir+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5 +echo "${ECHO_T}$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='sed -e 1s/^X//' +sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +# Constants: +rm="rm -f" + +# Global variables: +default_ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a +ltmain="$ac_aux_dir/ltmain.sh" +ofile="$default_ofile" +with_gnu_ld="$lt_cv_prog_gnu_ld" + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { echo "$as_me:$LINENO: result: $AR" >&5 +echo "${ECHO_T}$AR" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_AR="ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 +echo "${ECHO_T}$ac_ct_AR" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +else + AR="$ac_cv_prog_AR" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_RANLIB+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { echo "$as_me:$LINENO: result: $RANLIB" >&5 +echo "${ECHO_T}$RANLIB" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 +echo "${ECHO_T}$ac_ct_RANLIB" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { echo "$as_me:$LINENO: result: $STRIP" >&5 +echo "${ECHO_T}$STRIP" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_STRIP="strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 +echo "${ECHO_T}$ac_ct_STRIP" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +test -z "$AS" && AS=as +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$DLLTOOL" && DLLTOOL=dlltool +test -z "$LD" && LD=ld +test -z "$LN_S" && LN_S="ln -s" +test -z "$MAGIC_CMD" && MAGIC_CMD=file +test -z "$NM" && NM=nm +test -z "$SED" && SED=sed +test -z "$OBJDUMP" && OBJDUMP=objdump +test -z "$RANLIB" && RANLIB=: +test -z "$STRIP" && STRIP=: +test -z "$ac_objext" && ac_objext=o + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# Only perform the check for file, if the check method requires it +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5 +echo $ECHO_N "checking for ${ac_tool_prefix}file... $ECHO_C" >&6; } +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +echo "${ECHO_T}$MAGIC_CMD" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { echo "$as_me:$LINENO: checking for file" >&5 +echo $ECHO_N "checking for file... $ECHO_C" >&6; } +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +echo "${ECHO_T}$MAGIC_CMD" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +enable_dlopen=no +enable_win32_dll=no + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then + enableval=$enable_libtool_lock; +fi + +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + + +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then + withval=$with_pic; pic_mode="$withval" +else + pic_mode=default +fi + +test -z "$pic_mode" && pic_mode=default + +# Check if we have a version mismatch between libtool.m4 and ltmain.sh. +# +# Note: This should be in AC_LIBTOOL_SETUP, _after_ $ltmain have been defined. +# We also should do it _before_ AC_LIBTOOL_LANG_C_CONFIG that actually +# calls AC_LIBTOOL_CONFIG and creates libtool. +# +{ echo "$as_me:$LINENO: checking for correct ltmain.sh version" >&5 +echo $ECHO_N "checking for correct ltmain.sh version... $ECHO_C" >&6; } +if test "x$ltmain" = "x" ; then + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + { { echo "$as_me:$LINENO: error: + +*** [Gentoo] sanity check failed! *** +*** \$ltmain is not defined, please check the patch for consistency! *** +" >&5 +echo "$as_me: error: + +*** [Gentoo] sanity check failed! *** +*** \$ltmain is not defined, please check the patch for consistency! *** +" >&2;} + { (exit 1); exit 1; }; } +fi +gentoo_lt_version="1.5.22" +gentoo_ltmain_version=`sed -n '/^[ ]*VERSION=/{s/^[ ]*VERSION=//;p;q;}' "$ltmain"` +if test "x$gentoo_lt_version" != "x$gentoo_ltmain_version" ; then + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + { { echo "$as_me:$LINENO: error: + +*** [Gentoo] sanity check failed! *** +*** libtool.m4 and ltmain.sh have a version mismatch! *** +*** (libtool.m4 = $gentoo_lt_version, ltmain.sh = $gentoo_ltmain_version) *** + +Please run: + + libtoolize --copy --force + +if appropriate, please contact the maintainer of this +package (or your distribution) for help. +" >&5 +echo "$as_me: error: + +*** [Gentoo] sanity check failed! *** +*** libtool.m4 and ltmain.sh have a version mismatch! *** +*** (libtool.m4 = $gentoo_lt_version, ltmain.sh = $gentoo_ltmain_version) *** + +Please run: + + libtoolize --copy --force + +if appropriate, please contact the maintainer of this +package (or your distribution) for help. +" >&2;} + { (exit 1); exit 1; }; } +else + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +fi + + +# Use C for the default configuration in the libtool script +tagname= +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}\n' + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + + +lt_prog_compiler_no_builtin_flag= + +if test "$GCC" = yes; then + lt_prog_compiler_no_builtin_flag=' -fno-builtin' + + +{ echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6; } +if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:6364: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:6368: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + +lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + +{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic='-qnocommon' + lt_prog_compiler_wl='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic" >&6; } + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + +{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_pic_works+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:6632: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:6636: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works=yes + fi + fi + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works" >&6; } + +if test x"$lt_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_static_works+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works=yes + fi + else + lt_prog_compiler_static_works=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works" >&6; } + +if test x"$lt_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi + + +{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } +if test "${lt_cv_prog_compiler_c_o+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:6736: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:6740: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o" >&6; } + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6; } + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } + + runpath_var= + allow_undefined_flag= + enable_shared_with_static_runtimes=no + archive_cmds= + archive_expsym_cmds= + old_archive_From_new_cmds= + old_archive_from_expsyms_cmds= + export_dynamic_flag_spec= + whole_archive_flag_spec= + thread_safe_flag_spec= + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld= + hardcode_libdir_separator= + hardcode_direct=no + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + link_all_deplibs=unknown + hardcode_automatic=no + module_cmds= + module_expsym_cmds= + always_export_symbols=no + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + interix3*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct=yes + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + # see comment about different semantics on the GNU ld section + ld_shlibs=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + whole_archive_flag_spec='' + link_all_deplibs=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + freebsd1*) + ld_shlibs=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + hardcode_direct=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld='+b $libdir' + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld='-rpath $libdir' + fi + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + link_all_deplibs=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + openbsd*) + hardcode_direct=yes + hardcode_shlibpath_var=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*) + no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='${wl}-z,text' + allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + fi + +{ echo "$as_me:$LINENO: result: $ld_shlibs" >&5 +echo "${ECHO_T}$ld_shlibs" >&6; } +test "$ld_shlibs" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc=no + else + archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + { echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5 +echo "${ECHO_T}$archive_cmds_need_lc" >&6; } + ;; + esac + fi + ;; +esac + +{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + linux*) + if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ 01.* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + $archive_expsym_cmds="$archive_cmds" + fi + else + ld_shlibs=no + fi + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + # Handle Gentoo/FreeBSD as it was Linux + case $host_vendor in + gentoo) + version_type=linux ;; + *) + version_type=freebsd-$objformat ;; + esac + + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + linux) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + need_lib_prefix=no + need_version=no + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || \ + test -n "$runpath_var" || \ + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ echo "$as_me:$LINENO: result: $hardcode_action" >&5 +echo "${ECHO_T}$hardcode_action" >&6; } + +if test "$hardcode_action" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +{ echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } + else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + ;; + *) + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + ;; + esac +fi + +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6; } +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_dl_dlopen=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6; } +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + { echo "$as_me:$LINENO: checking for shl_load" >&5 +echo $ECHO_N "checking for shl_load... $ECHO_C" >&6; } +if test "${ac_cv_func_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char shl_load (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef shl_load + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_shl_load || defined __stub___shl_load +choke me +#endif + +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_func_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_func_shl_load=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 +echo "${ECHO_T}$ac_cv_func_shl_load" >&6; } +if test $ac_cv_func_shl_load = yes; then + lt_cv_dlopen="shl_load" +else + { echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 +echo $ECHO_N "checking for shl_load in -ldld... $ECHO_C" >&6; } +if test "${ac_cv_lib_dld_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_dld_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_dld_shl_load=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6; } +if test $ac_cv_lib_dld_shl_load = yes; then + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld" +else + { echo "$as_me:$LINENO: checking for dlopen" >&5 +echo $ECHO_N "checking for dlopen... $ECHO_C" >&6; } +if test "${ac_cv_func_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char dlopen (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef dlopen + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_dlopen || defined __stub___dlopen +choke me +#endif + +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_func_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_func_dlopen=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 +echo "${ECHO_T}$ac_cv_func_dlopen" >&6; } +if test $ac_cv_func_dlopen = yes; then + lt_cv_dlopen="dlopen" +else + { echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6; } +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_dl_dlopen=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6; } +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + { echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 +echo $ECHO_N "checking for dlopen in -lsvld... $ECHO_C" >&6; } +if test "${ac_cv_lib_svld_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_svld_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_svld_dlopen=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_svld_dlopen" >&6; } +if test $ac_cv_lib_svld_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + { echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 +echo $ECHO_N "checking for dld_link in -ldld... $ECHO_C" >&6; } +if test "${ac_cv_lib_dld_dld_link+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_dld_dld_link=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_dld_dld_link=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6; } +if test $ac_cv_lib_dld_dld_link = yes; then + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + { echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 +echo $ECHO_N "checking whether a program can dlopen itself... $ECHO_C" >&6; } +if test "${lt_cv_dlopen_self+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self" >&6; } + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 +echo $ECHO_N "checking whether a statically linked program can dlopen itself... $ECHO_C" >&6; } +if test "${lt_cv_dlopen_self_static+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + +# Report which library types will actually be built +{ echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 +echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6; } +{ echo "$as_me:$LINENO: result: $can_build_shared" >&5 +echo "${ECHO_T}$can_build_shared" >&6; } + +{ echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 +echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6; } +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case $host_os in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +{ echo "$as_me:$LINENO: result: $enable_shared" >&5 +echo "${ECHO_T}$enable_shared" >&6; } + +{ echo "$as_me:$LINENO: checking whether to build static libraries" >&5 +echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6; } +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +{ echo "$as_me:$LINENO: result: $enable_static" >&5 +echo "${ECHO_T}$enable_static" >&6; } + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler \ + CC \ + LD \ + lt_prog_compiler_wl \ + lt_prog_compiler_pic \ + lt_prog_compiler_static \ + lt_prog_compiler_no_builtin_flag \ + export_dynamic_flag_spec \ + thread_safe_flag_spec \ + whole_archive_flag_spec \ + enable_shared_with_static_runtimes \ + old_archive_cmds \ + old_archive_from_new_cmds \ + predep_objects \ + postdep_objects \ + predeps \ + postdeps \ + compiler_lib_search_path \ + archive_cmds \ + archive_expsym_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + old_archive_from_expsyms_cmds \ + allow_undefined_flag \ + no_undefined_flag \ + export_symbols_cmds \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ + hardcode_automatic \ + module_cmds \ + module_expsym_cmds \ + lt_cv_prog_compiler_c_o \ + exclude_expsyms \ + include_expsyms; do + + case $var in + old_archive_cmds | \ + old_archive_from_new_cmds | \ + archive_cmds | \ + archive_expsym_cmds | \ + module_cmds | \ + module_expsym_cmds | \ + old_archive_from_expsyms_cmds | \ + export_symbols_cmds | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="${ofile}T" + trap "$rm \"$cfgfile\"; exit 1" 1 2 15 + $rm -f "$cfgfile" + { echo "$as_me:$LINENO: creating $ofile" >&5 +echo "$as_me: creating $ofile" >&6;} + + cat <<__EOF__ >> "$cfgfile" +#! $SHELL + +# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 +# Free Software Foundation, Inc. +# +# This file is part of GNU Libtool: +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# The names of the tagged configurations supported by this script. +available_tags= + +# ### BEGIN LIBTOOL CONFIG + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU C compiler? +with_gcc=$GCC + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# ### END LIBTOOL CONFIG + +__EOF__ + + + case $host_os in + aix3*) + cat <<\EOF >> "$cfgfile" + +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +EOF + ;; + esac + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || \ + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + +# Check whether --with-tags was given. +if test "${with_tags+set}" = set; then + withval=$with_tags; tagnames="$withval" +fi + + +if test -f "$ltmain" && test -n "$tagnames"; then + if test ! -f "${ofile}"; then + { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not exist" >&5 +echo "$as_me: WARNING: output file \`$ofile' does not exist" >&2;} + fi + + if test -z "$LTCC"; then + eval "`$SHELL ${ofile} --config | grep '^LTCC='`" + if test -z "$LTCC"; then + { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not look like a libtool script" >&5 +echo "$as_me: WARNING: output file \`$ofile' does not look like a libtool script" >&2;} + else + { echo "$as_me:$LINENO: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&5 +echo "$as_me: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&2;} + fi + fi + if test -z "$LTCFLAGS"; then + eval "`$SHELL ${ofile} --config | grep '^LTCFLAGS='`" + fi + + # Extract list of available tagged configurations in $ofile. + # Note that this assumes the entire list is on one line. + available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` + + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for tagname in $tagnames; do + IFS="$lt_save_ifs" + # Check whether tagname contains only valid characters + case `$echo "X$tagname" | $Xsed -e 's:[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]::g'` in + "") ;; + *) { { echo "$as_me:$LINENO: error: invalid tag name: $tagname" >&5 +echo "$as_me: error: invalid tag name: $tagname" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null + then + { { echo "$as_me:$LINENO: error: tag name \"$tagname\" already exists" >&5 +echo "$as_me: error: tag name \"$tagname\" already exists" >&2;} + { (exit 1); exit 1; }; } + fi + + # Update the list of available tags. + if test -n "$tagname"; then + echo appending configuration tag \"$tagname\" to $ofile + + case $tagname in + CXX) + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_flag_spec_ld_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(int, char *[]) { return(0); }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_LD=$LD +lt_save_GCC=$GCC +GCC=$GXX +lt_save_with_gnu_ld=$with_gnu_ld +lt_save_path_LD=$lt_cv_path_LD +if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx +else + $as_unset lt_cv_prog_gnu_ld +fi +if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX +else + $as_unset lt_cv_path_LD +fi +test -z "${LDCXX+set}" || LD=$LDCXX +CC=${CXX-"c++"} +compiler=$CC +compiler_CXX=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# We don't want -fno-exception wen compiling C++ code, so set the +# no_builtin_flag separately +if test "$GXX" = yes; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' +else + lt_prog_compiler_no_builtin_flag_CXX= +fi + +if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { echo "$as_me:$LINENO: checking for ld used by $CC" >&5 +echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { echo "$as_me:$LINENO: checking for GNU ld" >&5 +echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6; } +else + { echo "$as_me:$LINENO: checking for non-GNU ld" >&5 +echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6; } +fi +if test "${lt_cv_path_LD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +echo "${ECHO_T}$LD" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi +test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 +echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} + { (exit 1); exit 1; }; } +{ echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 +echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6; } +if test "${lt_cv_prog_gnu_ld+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ + grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + +else + GXX=no + with_gnu_ld=no + wlarc= +fi + +# PORTME: fill in a description of your system's C++ link characteristics +{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } +ld_shlibs_CXX=yes +case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + + if test "$GXX" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_CXX=yes + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_CXX=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' ${wl}-bernotok' + allow_undefined_flag_CXX=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + archive_cmds_need_lc_CXX=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag_CXX='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_CXX='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + whole_archive_flag_spec_CXX='' + link_all_deplibs_CXX=yes + + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes + fi + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + archive_cmds_CXX='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + else + archive_cmds_CXX='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_CXX='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_CXX=no + ;; + esac + fi + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + freebsd[12]*) + # C++ shared libraries reported to be fairly broken before switch to ELF + ld_shlibs_CXX=no + ;; + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + gnu*) + ;; + hpux9*) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='${wl}-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[-]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + archive_cmds_CXX='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld_CXX='+b $libdir' + ;; + *) + export_dynamic_flag_spec_CXX='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + interix3*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + ;; + linux*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc*) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC*) + # Portland Group C++ compiler + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + esac + ;; + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + openbsd2*) + # C++ shared libraries are fairly broken + ld_shlibs_CXX=no + ;; + openbsd*) + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='${wl}-E' + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; + osf3*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ + $rm $lib.exp' + + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The C++ compiler is used as linker so we must use $wl + # flag to pass the commands to the underlying system + # linker. We must also pass each convience library through + # to the system linker between allextract/defaultextract. + # The C++ compiler will combine linker options so we + # cannot just pass the convience library names through + # without $wl. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | grep -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + fi + + hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' + fi + ;; + esac + ;; + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='${wl}-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + # So that behaviour is only enabled if SCOABSPATH is set to a + # non-empty value in the environment. Most likely only useful for + # creating official distributions of packages. + # This is a hack until libtool officially supports absolute path + # names for shared libraries. + no_undefined_flag_CXX='${wl}-z,text' + allow_undefined_flag_CXX='${wl}-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; +esac +{ echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 +echo "${ECHO_T}$ld_shlibs_CXX" >&6; } +test "$ld_shlibs_CXX" = no && can_build_shared=no + +GCC_CXX="$GXX" +LD_CXX="$LD" + + +cat > conftest.$ac_ext <&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + # The `*' in the case matches for architectures that use `case' in + # $output_verbose_cmd can trigger glob expansion during the loop + # eval without this substitution. + output_verbose_link_cmd=`$echo "X$output_verbose_link_cmd" | $Xsed -e "$no_glob_subst"` + + for p in `eval $output_verbose_link_cmd`; do + case $p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" \ + || test $p = "-R"; then + prev=$p + continue + else + prev= + fi + + if test "$pre_test_object_deps_done" = no; then + case $p in + -L* | -R*) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX="${prev}${p}" + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX="${prev}${p}" + else + postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi + ;; + + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX="$p" + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX="$p" + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$rm -f confest.$objext + +# PORTME: override above test on systems where it is broken +case $host_os in +interix3*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= + ;; + +solaris*) + case $cc_basename in + CC*) + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + postdeps_CXX='-lCstd -lCrun' + ;; + esac + ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + +lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + +{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } + + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + ;; + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | os2* | pw32*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix4* | aix5*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_AC_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_CXX='-qnocommon' + lt_prog_compiler_wl_CXX='-Wl,' + ;; + esac + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + icpc* | ecpc*) + # Intel C++ + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC*) + # Portland Group C++ compiler. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_CXX" >&6; } + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + +{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_pic_works_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:11517: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:11521: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_CXX=yes + fi + fi + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_CXX" >&6; } + +if test x"$lt_prog_compiler_pic_works_CXX" = xyes; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_static_works_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works_CXX=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works_CXX=yes + fi + else + lt_prog_compiler_static_works_CXX=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works_CXX" >&6; } + +if test x"$lt_prog_compiler_static_works_CXX" = xyes; then + : +else + lt_prog_compiler_static_CXX= +fi + + +{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } +if test "${lt_cv_prog_compiler_c_o_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:11621: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:11625: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_CXX" >&6; } + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6; } + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix4* | aix5*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX="$ltdll_cmds" + ;; + cygwin* | mingw*) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([^ ]*\) [^ ]*/\1 DATA/;/^I /d;/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +{ echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 +echo "${ECHO_T}$ld_shlibs_CXX" >&6; } +test "$ld_shlibs_CXX" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_CXX=no + else + archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + { echo "$as_me:$LINENO: result: $archive_cmds_need_lc_CXX" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_CXX" >&6; } + ;; + esac + fi + ;; +esac + +{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + linux*) + if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ 01.* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + $archive_expsym_cmds="$archive_cmds" + fi + else + ld_shlibs=no + fi + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + # Handle Gentoo/FreeBSD as it was Linux + case $host_vendor in + gentoo) + version_type=linux ;; + *) + version_type=freebsd-$objformat ;; + esac + + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + linux) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + need_lib_prefix=no + need_version=no + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || \ + test -n "$runpath_var_CXX" || \ + test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, CXX)" != no && + test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +{ echo "$as_me:$LINENO: result: $hardcode_action_CXX" >&5 +echo "${ECHO_T}$hardcode_action_CXX" >&6; } + +if test "$hardcode_action_CXX" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_CXX \ + CC_CXX \ + LD_CXX \ + lt_prog_compiler_wl_CXX \ + lt_prog_compiler_pic_CXX \ + lt_prog_compiler_static_CXX \ + lt_prog_compiler_no_builtin_flag_CXX \ + export_dynamic_flag_spec_CXX \ + thread_safe_flag_spec_CXX \ + whole_archive_flag_spec_CXX \ + enable_shared_with_static_runtimes_CXX \ + old_archive_cmds_CXX \ + old_archive_from_new_cmds_CXX \ + predep_objects_CXX \ + postdep_objects_CXX \ + predeps_CXX \ + postdeps_CXX \ + compiler_lib_search_path_CXX \ + archive_cmds_CXX \ + archive_expsym_cmds_CXX \ + postinstall_cmds_CXX \ + postuninstall_cmds_CXX \ + old_archive_from_expsyms_cmds_CXX \ + allow_undefined_flag_CXX \ + no_undefined_flag_CXX \ + export_symbols_cmds_CXX \ + hardcode_libdir_flag_spec_CXX \ + hardcode_libdir_flag_spec_ld_CXX \ + hardcode_libdir_separator_CXX \ + hardcode_automatic_CXX \ + module_cmds_CXX \ + module_expsym_cmds_CXX \ + lt_cv_prog_compiler_c_o_CXX \ + exclude_expsyms_CXX \ + include_expsyms_CXX; do + + case $var in + old_archive_cmds_CXX | \ + old_archive_from_new_cmds_CXX | \ + archive_cmds_CXX | \ + archive_expsym_cmds_CXX | \ + module_cmds_CXX | \ + module_expsym_cmds_CXX | \ + old_archive_from_expsyms_cmds_CXX | \ + export_symbols_cmds_CXX | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_CXX + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_CXX + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_CXX +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_CXX + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_CXX + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_CXX + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_CXX + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_CXX" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC=$lt_save_CC +LDCXX=$LD +LD=$lt_save_LD +GCC=$lt_save_GCC +with_gnu_ldcxx=$with_gnu_ld +with_gnu_ld=$lt_save_with_gnu_ld +lt_cv_path_LDCXX=$lt_cv_path_LD +lt_cv_path_LD=$lt_save_path_LD +lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld +lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld + + else + tagname="" + fi + ;; + + F77) + if test -n "$F77" && test "X$F77" != "Xno"; then + +ac_ext=f +ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' +ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_f77_compiler_gnu + + +archive_cmds_need_lc_F77=no +allow_undefined_flag_F77= +always_export_symbols_F77=no +archive_expsym_cmds_F77= +export_dynamic_flag_spec_F77= +hardcode_direct_F77=no +hardcode_libdir_flag_spec_F77= +hardcode_libdir_flag_spec_ld_F77= +hardcode_libdir_separator_F77= +hardcode_minus_L_F77=no +hardcode_automatic_F77=no +module_cmds_F77= +module_expsym_cmds_F77= +link_all_deplibs_F77=unknown +old_archive_cmds_F77=$old_archive_cmds +no_undefined_flag_F77= +whole_archive_flag_spec_F77= +enable_shared_with_static_runtimes_F77=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +objext_F77=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code=" subroutine t\n return\n end\n" + +# Code to be used in simple link tests +lt_simple_link_test_code=" program t\n end\n" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${F77-"f77"} +compiler=$CC +compiler_F77=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +{ echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 +echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6; } +{ echo "$as_me:$LINENO: result: $can_build_shared" >&5 +echo "${ECHO_T}$can_build_shared" >&6; } + +{ echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 +echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6; } +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case $host_os in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +{ echo "$as_me:$LINENO: result: $enable_shared" >&5 +echo "${ECHO_T}$enable_shared" >&6; } + +{ echo "$as_me:$LINENO: checking whether to build static libraries" >&5 +echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6; } +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +{ echo "$as_me:$LINENO: result: $enable_static" >&5 +echo "${ECHO_T}$enable_static" >&6; } + +GCC_F77="$G77" +LD_F77="$LD" + +lt_prog_compiler_wl_F77= +lt_prog_compiler_pic_F77= +lt_prog_compiler_static_F77= + +{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_static_F77='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_F77='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_F77='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_F77='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_F77='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared_F77=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_F77=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_F77='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic_F77='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl_F77='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_F77='-Bstatic' + else + lt_prog_compiler_static_F77='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_F77='-qnocommon' + lt_prog_compiler_wl_F77='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_F77='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl_F77='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_F77='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static_F77='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl_F77='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static_F77='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-fpic' + lt_prog_compiler_static_F77='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl_F77='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static_F77='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl_F77='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static_F77='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl_F77='-Qoption ld ';; + *) + lt_prog_compiler_wl_F77='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl_F77='-Qoption ld ' + lt_prog_compiler_pic_F77='-PIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic_F77='-Kconform_pic' + lt_prog_compiler_static_F77='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_can_build_shared_F77=no + ;; + + uts4*) + lt_prog_compiler_pic_F77='-pic' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared_F77=no + ;; + esac + fi + +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_F77" >&6; } + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_F77"; then + +{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_pic_works_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_F77=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_F77" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:13228: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:13232: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_F77=yes + fi + fi + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_F77" >&6; } + +if test x"$lt_prog_compiler_pic_works_F77" = xyes; then + case $lt_prog_compiler_pic_F77 in + "" | " "*) ;; + *) lt_prog_compiler_pic_F77=" $lt_prog_compiler_pic_F77" ;; + esac +else + lt_prog_compiler_pic_F77= + lt_prog_compiler_can_build_shared_F77=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_F77= + ;; + *) + lt_prog_compiler_pic_F77="$lt_prog_compiler_pic_F77" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_F77 eval lt_tmp_static_flag=\"$lt_prog_compiler_static_F77\" +{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_static_works_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works_F77=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works_F77=yes + fi + else + lt_prog_compiler_static_works_F77=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works_F77" >&6; } + +if test x"$lt_prog_compiler_static_works_F77" = xyes; then + : +else + lt_prog_compiler_static_F77= +fi + + +{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } +if test "${lt_cv_prog_compiler_c_o_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_F77=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:13332: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:13336: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_F77=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_F77" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_F77" >&6; } + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_F77" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6; } + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } + + runpath_var= + allow_undefined_flag_F77= + enable_shared_with_static_runtimes_F77=no + archive_cmds_F77= + archive_expsym_cmds_F77= + old_archive_From_new_cmds_F77= + old_archive_from_expsyms_cmds_F77= + export_dynamic_flag_spec_F77= + whole_archive_flag_spec_F77= + thread_safe_flag_spec_F77= + hardcode_libdir_flag_spec_F77= + hardcode_libdir_flag_spec_ld_F77= + hardcode_libdir_separator_F77= + hardcode_direct_F77=no + hardcode_minus_L_F77=no + hardcode_shlibpath_var_F77=unsupported + link_all_deplibs_F77=unknown + hardcode_automatic_F77=no + module_cmds_F77= + module_expsym_cmds_F77= + always_export_symbols_F77=no + export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms_F77= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms_F77="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs_F77=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_F77='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_F77='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_F77="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_F77= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs_F77=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs_F77=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_F77=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_F77='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_F77=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, F77) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_F77='-L$libdir' + allow_undefined_flag_F77=unsupported + always_export_symbols_F77=no + enable_shared_with_static_runtimes_F77=yes + export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_F77='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_F77=no + fi + ;; + + interix3*) + hardcode_direct_F77=no + hardcode_shlibpath_var_F77=no + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + export_dynamic_flag_spec_F77='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_F77='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_F77='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec_F77='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + whole_archive_flag_spec_F77='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds_F77='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_F77='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs_F77=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_F77='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs_F77=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs_F77=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec_F77='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds_F77='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + esac + + if test "$ld_shlibs_F77" = no; then + runpath_var= + hardcode_libdir_flag_spec_F77= + export_dynamic_flag_spec_F77= + whole_archive_flag_spec_F77= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag_F77=unsupported + always_export_symbols_F77=yes + archive_expsym_cmds_F77='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L_F77=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct_F77=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_F77='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_F77='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_F77='' + hardcode_direct_F77=yes + hardcode_libdir_separator_F77=':' + link_all_deplibs_F77=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_F77=yes + else + # We have old collect2 + hardcode_direct_F77=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_F77=yes + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_libdir_separator_F77= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_F77=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_F77='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_f77_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds_F77="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_F77='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_F77="-z nodefs" + archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_f77_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_F77=' ${wl}-bernotok' + allow_undefined_flag_F77=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_F77='$convenience' + archive_cmds_need_lc_F77=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + # see comment about different semantics on the GNU ld section + ld_shlibs_F77=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec_F77=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_F77=' ' + allow_undefined_flag_F77=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_F77='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds_F77='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds_F77='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path_F77='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes_F77=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag_F77='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_F77='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_F77=no + hardcode_direct_F77=no + hardcode_automatic_F77=yes + hardcode_shlibpath_var_F77=unsupported + whole_archive_flag_spec_F77='' + link_all_deplibs_F77=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_F77=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_shlibpath_var_F77=no + ;; + + freebsd1*) + ld_shlibs_F77=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes + hardcode_minus_L_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds_F77='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds_F77='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds_F77='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + hardcode_direct_F77=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + archive_cmds_F77='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + + hardcode_direct_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds_F77='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_F77='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_F77='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld_F77='+b $libdir' + hardcode_direct_F77=no + hardcode_shlibpath_var_F77=no + ;; + *) + hardcode_direct_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_F77='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld_F77='-rpath $libdir' + fi + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + link_all_deplibs_F77=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds_F77='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + newsos6) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + hardcode_shlibpath_var_F77=no + ;; + + openbsd*) + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + export_dynamic_flag_spec_F77='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-R$libdir' + ;; + *) + archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + allow_undefined_flag_F77=unsupported + archive_cmds_F77='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds_F77='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag_F77=' -expect_unresolved \*' + archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag_F77=' -expect_unresolved \*' + archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_F77='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec_F77='-rpath $libdir' + fi + hardcode_libdir_separator_F77=: + ;; + + solaris*) + no_undefined_flag_F77=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds_F77='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds_F77='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_shlibpath_var_F77=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec_F77='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec_F77='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs_F77=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds_F77='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_direct_F77=yes + hardcode_minus_L_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds_F77='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds_F77='$CC -r -o $output$reload_objs' + hardcode_direct_F77=no + ;; + motorola) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_F77=no + ;; + + sysv4.3*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_F77=no + export_dynamic_flag_spec_F77='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_F77=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs_F77=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*) + no_undefined_flag_F77='${wl}-z,text' + archive_cmds_need_lc_F77=no + hardcode_shlibpath_var_F77=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_F77='${wl}-z,text' + allow_undefined_flag_F77='${wl}-z,nodefs' + archive_cmds_need_lc_F77=no + hardcode_shlibpath_var_F77=no + hardcode_libdir_flag_spec_F77='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator_F77=':' + link_all_deplibs_F77=yes + export_dynamic_flag_spec_F77='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_shlibpath_var_F77=no + ;; + + *) + ld_shlibs_F77=no + ;; + esac + fi + +{ echo "$as_me:$LINENO: result: $ld_shlibs_F77" >&5 +echo "${ECHO_T}$ld_shlibs_F77" >&6; } +test "$ld_shlibs_F77" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_F77" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_F77=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_F77 in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_F77 + pic_flag=$lt_prog_compiler_pic_F77 + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_F77 + allow_undefined_flag_F77= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_F77=no + else + archive_cmds_need_lc_F77=yes + fi + allow_undefined_flag_F77=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + { echo "$as_me:$LINENO: result: $archive_cmds_need_lc_F77" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_F77" >&6; } + ;; + esac + fi + ;; +esac + +{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + linux*) + if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ 01.* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + $archive_expsym_cmds="$archive_cmds" + fi + else + ld_shlibs=no + fi + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + # Handle Gentoo/FreeBSD as it was Linux + case $host_vendor in + gentoo) + version_type=linux ;; + *) + version_type=freebsd-$objformat ;; + esac + + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + linux) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + need_lib_prefix=no + need_version=no + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } +hardcode_action_F77= +if test -n "$hardcode_libdir_flag_spec_F77" || \ + test -n "$runpath_var_F77" || \ + test "X$hardcode_automatic_F77" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_F77" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, F77)" != no && + test "$hardcode_minus_L_F77" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_F77=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_F77=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_F77=unsupported +fi +{ echo "$as_me:$LINENO: result: $hardcode_action_F77" >&5 +echo "${ECHO_T}$hardcode_action_F77" >&6; } + +if test "$hardcode_action_F77" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_F77 \ + CC_F77 \ + LD_F77 \ + lt_prog_compiler_wl_F77 \ + lt_prog_compiler_pic_F77 \ + lt_prog_compiler_static_F77 \ + lt_prog_compiler_no_builtin_flag_F77 \ + export_dynamic_flag_spec_F77 \ + thread_safe_flag_spec_F77 \ + whole_archive_flag_spec_F77 \ + enable_shared_with_static_runtimes_F77 \ + old_archive_cmds_F77 \ + old_archive_from_new_cmds_F77 \ + predep_objects_F77 \ + postdep_objects_F77 \ + predeps_F77 \ + postdeps_F77 \ + compiler_lib_search_path_F77 \ + archive_cmds_F77 \ + archive_expsym_cmds_F77 \ + postinstall_cmds_F77 \ + postuninstall_cmds_F77 \ + old_archive_from_expsyms_cmds_F77 \ + allow_undefined_flag_F77 \ + no_undefined_flag_F77 \ + export_symbols_cmds_F77 \ + hardcode_libdir_flag_spec_F77 \ + hardcode_libdir_flag_spec_ld_F77 \ + hardcode_libdir_separator_F77 \ + hardcode_automatic_F77 \ + module_cmds_F77 \ + module_expsym_cmds_F77 \ + lt_cv_prog_compiler_c_o_F77 \ + exclude_expsyms_F77 \ + include_expsyms_F77; do + + case $var in + old_archive_cmds_F77 | \ + old_archive_from_new_cmds_F77 | \ + archive_cmds_F77 | \ + archive_expsym_cmds_F77 | \ + module_cmds_F77 | \ + module_expsym_cmds_F77 | \ + old_archive_from_expsyms_cmds_F77 | \ + export_symbols_cmds_F77 | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_F77 + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_F77 + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_F77 + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_F77 + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_F77 + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_F77 + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_F77 +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_F77 + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_F77 + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_F77 + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_F77 + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_F77 + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_F77 + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_F77 +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_F77 + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_F77 + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_F77 +archive_expsym_cmds=$lt_archive_expsym_cmds_F77 +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_F77 +module_expsym_cmds=$lt_module_expsym_cmds_F77 + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_F77 + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_F77 + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_F77 + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_F77 + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_F77 + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_F77 + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_F77 + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_F77 + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_F77 + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_F77 + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_F77 + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_F77 + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_F77 + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_F77 + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_F77 + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_F77 + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_F77" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_F77 + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_F77 + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_F77 + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_F77 + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + else + tagname="" + fi + ;; + + GCJ) + if test -n "$GCJ" && test "X$GCJ" != "Xno"; then + + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +objext_GCJ=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[] argv) {}; }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${GCJ-"gcj"} +compiler=$CC +compiler_GCJ=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +archive_cmds_need_lc_GCJ=no + +old_archive_cmds_GCJ=$old_archive_cmds + + +lt_prog_compiler_no_builtin_flag_GCJ= + +if test "$GCC" = yes; then + lt_prog_compiler_no_builtin_flag_GCJ=' -fno-builtin' + + +{ echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6; } +if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:15567: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:15571: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag_GCJ="$lt_prog_compiler_no_builtin_flag_GCJ -fno-rtti -fno-exceptions" +else + : +fi + +fi + +lt_prog_compiler_wl_GCJ= +lt_prog_compiler_pic_GCJ= +lt_prog_compiler_static_GCJ= + +{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_static_GCJ='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_GCJ='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_GCJ='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_GCJ='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared_GCJ=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_GCJ=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_GCJ='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic_GCJ='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl_GCJ='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_GCJ='-Bstatic' + else + lt_prog_compiler_static_GCJ='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_GCJ='-qnocommon' + lt_prog_compiler_wl_GCJ='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl_GCJ='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_GCJ='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static_GCJ='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl_GCJ='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-fpic' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl_GCJ='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl_GCJ='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl_GCJ='-Qoption ld ';; + *) + lt_prog_compiler_wl_GCJ='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl_GCJ='-Qoption ld ' + lt_prog_compiler_pic_GCJ='-PIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic_GCJ='-Kconform_pic' + lt_prog_compiler_static_GCJ='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_can_build_shared_GCJ=no + ;; + + uts4*) + lt_prog_compiler_pic_GCJ='-pic' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared_GCJ=no + ;; + esac + fi + +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_GCJ" >&6; } + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_GCJ"; then + +{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_pic_works_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_GCJ=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_GCJ" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:15835: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:15839: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_GCJ=yes + fi + fi + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_GCJ" >&6; } + +if test x"$lt_prog_compiler_pic_works_GCJ" = xyes; then + case $lt_prog_compiler_pic_GCJ in + "" | " "*) ;; + *) lt_prog_compiler_pic_GCJ=" $lt_prog_compiler_pic_GCJ" ;; + esac +else + lt_prog_compiler_pic_GCJ= + lt_prog_compiler_can_build_shared_GCJ=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_GCJ= + ;; + *) + lt_prog_compiler_pic_GCJ="$lt_prog_compiler_pic_GCJ" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_GCJ eval lt_tmp_static_flag=\"$lt_prog_compiler_static_GCJ\" +{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } +if test "${lt_prog_compiler_static_works_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works_GCJ=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works_GCJ=yes + fi + else + lt_prog_compiler_static_works_GCJ=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works_GCJ" >&6; } + +if test x"$lt_prog_compiler_static_works_GCJ" = xyes; then + : +else + lt_prog_compiler_static_GCJ= +fi + + +{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } +if test "${lt_cv_prog_compiler_c_o_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_GCJ=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:15939: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:15943: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_GCJ=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_GCJ" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_GCJ" >&6; } + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_GCJ" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6; } + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } + + runpath_var= + allow_undefined_flag_GCJ= + enable_shared_with_static_runtimes_GCJ=no + archive_cmds_GCJ= + archive_expsym_cmds_GCJ= + old_archive_From_new_cmds_GCJ= + old_archive_from_expsyms_cmds_GCJ= + export_dynamic_flag_spec_GCJ= + whole_archive_flag_spec_GCJ= + thread_safe_flag_spec_GCJ= + hardcode_libdir_flag_spec_GCJ= + hardcode_libdir_flag_spec_ld_GCJ= + hardcode_libdir_separator_GCJ= + hardcode_direct_GCJ=no + hardcode_minus_L_GCJ=no + hardcode_shlibpath_var_GCJ=unsupported + link_all_deplibs_GCJ=unknown + hardcode_automatic_GCJ=no + module_cmds_GCJ= + module_expsym_cmds_GCJ= + always_export_symbols_GCJ=no + export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms_GCJ= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms_GCJ="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs_GCJ=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_GCJ='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_GCJ='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_GCJ="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_GCJ= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs_GCJ=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs_GCJ=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_GCJ=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_GCJ='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, GCJ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_GCJ='-L$libdir' + allow_undefined_flag_GCJ=unsupported + always_export_symbols_GCJ=no + enable_shared_with_static_runtimes_GCJ=yes + export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_GCJ='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + interix3*) + hardcode_direct_GCJ=no + hardcode_shlibpath_var_GCJ=no + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + export_dynamic_flag_spec_GCJ='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_GCJ='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_GCJ='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec_GCJ='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + whole_archive_flag_spec_GCJ='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds_GCJ='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_GCJ='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs_GCJ=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_GCJ='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs_GCJ=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs_GCJ=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec_GCJ='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds_GCJ='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + esac + + if test "$ld_shlibs_GCJ" = no; then + runpath_var= + hardcode_libdir_flag_spec_GCJ= + export_dynamic_flag_spec_GCJ= + whole_archive_flag_spec_GCJ= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag_GCJ=unsupported + always_export_symbols_GCJ=yes + archive_expsym_cmds_GCJ='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L_GCJ=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct_GCJ=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_GCJ='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_GCJ='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_GCJ='' + hardcode_direct_GCJ=yes + hardcode_libdir_separator_GCJ=':' + link_all_deplibs_GCJ=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_GCJ=yes + else + # We have old collect2 + hardcode_direct_GCJ=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_GCJ=yes + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_libdir_separator_GCJ= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_GCJ=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_GCJ='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds_GCJ="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_GCJ='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_GCJ="-z nodefs" + archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_GCJ=' ${wl}-bernotok' + allow_undefined_flag_GCJ=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_GCJ='$convenience' + archive_cmds_need_lc_GCJ=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + # see comment about different semantics on the GNU ld section + ld_shlibs_GCJ=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec_GCJ=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_GCJ=' ' + allow_undefined_flag_GCJ=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_GCJ='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds_GCJ='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds_GCJ='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path_GCJ='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes_GCJ=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_GCJ=no + hardcode_direct_GCJ=no + hardcode_automatic_GCJ=yes + hardcode_shlibpath_var_GCJ=unsupported + whole_archive_flag_spec_GCJ='' + link_all_deplibs_GCJ=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_GCJ=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_shlibpath_var_GCJ=no + ;; + + freebsd1*) + ld_shlibs_GCJ=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes + hardcode_minus_L_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds_GCJ='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds_GCJ='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds_GCJ='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + hardcode_direct_GCJ=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + archive_cmds_GCJ='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + + hardcode_direct_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds_GCJ='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_GCJ='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_GCJ='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld_GCJ='+b $libdir' + hardcode_direct_GCJ=no + hardcode_shlibpath_var_GCJ=no + ;; + *) + hardcode_direct_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_GCJ='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld_GCJ='-rpath $libdir' + fi + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + link_all_deplibs_GCJ=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds_GCJ='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + newsos6) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + hardcode_shlibpath_var_GCJ=no + ;; + + openbsd*) + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + export_dynamic_flag_spec_GCJ='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + ;; + *) + archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + allow_undefined_flag_GCJ=unsupported + archive_cmds_GCJ='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds_GCJ='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag_GCJ=' -expect_unresolved \*' + archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag_GCJ=' -expect_unresolved \*' + archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_GCJ='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec_GCJ='-rpath $libdir' + fi + hardcode_libdir_separator_GCJ=: + ;; + + solaris*) + no_undefined_flag_GCJ=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds_GCJ='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds_GCJ='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_shlibpath_var_GCJ=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec_GCJ='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec_GCJ='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs_GCJ=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds_GCJ='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_direct_GCJ=yes + hardcode_minus_L_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds_GCJ='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds_GCJ='$CC -r -o $output$reload_objs' + hardcode_direct_GCJ=no + ;; + motorola) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_GCJ=no + ;; + + sysv4.3*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_GCJ=no + export_dynamic_flag_spec_GCJ='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_GCJ=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs_GCJ=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*) + no_undefined_flag_GCJ='${wl}-z,text' + archive_cmds_need_lc_GCJ=no + hardcode_shlibpath_var_GCJ=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_GCJ='${wl}-z,text' + allow_undefined_flag_GCJ='${wl}-z,nodefs' + archive_cmds_need_lc_GCJ=no + hardcode_shlibpath_var_GCJ=no + hardcode_libdir_flag_spec_GCJ='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator_GCJ=':' + link_all_deplibs_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_shlibpath_var_GCJ=no + ;; + + *) + ld_shlibs_GCJ=no + ;; + esac + fi + +{ echo "$as_me:$LINENO: result: $ld_shlibs_GCJ" >&5 +echo "${ECHO_T}$ld_shlibs_GCJ" >&6; } +test "$ld_shlibs_GCJ" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_GCJ" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_GCJ=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_GCJ in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_GCJ + pic_flag=$lt_prog_compiler_pic_GCJ + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_GCJ + allow_undefined_flag_GCJ= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_GCJ=no + else + archive_cmds_need_lc_GCJ=yes + fi + allow_undefined_flag_GCJ=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + { echo "$as_me:$LINENO: result: $archive_cmds_need_lc_GCJ" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_GCJ" >&6; } + ;; + esac + fi + ;; +esac + +{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + linux*) + if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ 01.* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ +cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +$echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + else + $archive_expsym_cmds="$archive_cmds" + fi + else + ld_shlibs=no + fi + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + # Handle Gentoo/FreeBSD as it was Linux + case $host_vendor in + gentoo) + version_type=linux ;; + *) + version_type=freebsd-$objformat ;; + esac + + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + linux) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + need_lib_prefix=no + need_version=no + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } +hardcode_action_GCJ= +if test -n "$hardcode_libdir_flag_spec_GCJ" || \ + test -n "$runpath_var_GCJ" || \ + test "X$hardcode_automatic_GCJ" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_GCJ" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, GCJ)" != no && + test "$hardcode_minus_L_GCJ" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_GCJ=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_GCJ=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_GCJ=unsupported +fi +{ echo "$as_me:$LINENO: result: $hardcode_action_GCJ" >&5 +echo "${ECHO_T}$hardcode_action_GCJ" >&6; } + +if test "$hardcode_action_GCJ" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_GCJ \ + CC_GCJ \ + LD_GCJ \ + lt_prog_compiler_wl_GCJ \ + lt_prog_compiler_pic_GCJ \ + lt_prog_compiler_static_GCJ \ + lt_prog_compiler_no_builtin_flag_GCJ \ + export_dynamic_flag_spec_GCJ \ + thread_safe_flag_spec_GCJ \ + whole_archive_flag_spec_GCJ \ + enable_shared_with_static_runtimes_GCJ \ + old_archive_cmds_GCJ \ + old_archive_from_new_cmds_GCJ \ + predep_objects_GCJ \ + postdep_objects_GCJ \ + predeps_GCJ \ + postdeps_GCJ \ + compiler_lib_search_path_GCJ \ + archive_cmds_GCJ \ + archive_expsym_cmds_GCJ \ + postinstall_cmds_GCJ \ + postuninstall_cmds_GCJ \ + old_archive_from_expsyms_cmds_GCJ \ + allow_undefined_flag_GCJ \ + no_undefined_flag_GCJ \ + export_symbols_cmds_GCJ \ + hardcode_libdir_flag_spec_GCJ \ + hardcode_libdir_flag_spec_ld_GCJ \ + hardcode_libdir_separator_GCJ \ + hardcode_automatic_GCJ \ + module_cmds_GCJ \ + module_expsym_cmds_GCJ \ + lt_cv_prog_compiler_c_o_GCJ \ + exclude_expsyms_GCJ \ + include_expsyms_GCJ; do + + case $var in + old_archive_cmds_GCJ | \ + old_archive_from_new_cmds_GCJ | \ + archive_cmds_GCJ | \ + archive_expsym_cmds_GCJ | \ + module_cmds_GCJ | \ + module_expsym_cmds_GCJ | \ + old_archive_from_expsyms_cmds_GCJ | \ + export_symbols_cmds_GCJ | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_GCJ + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_GCJ + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_GCJ + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_GCJ + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_GCJ + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_GCJ + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_GCJ +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_GCJ + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_GCJ + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_GCJ + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_GCJ + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_GCJ + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_GCJ + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_GCJ +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_GCJ + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_GCJ + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_GCJ +archive_expsym_cmds=$lt_archive_expsym_cmds_GCJ +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_GCJ +module_expsym_cmds=$lt_module_expsym_cmds_GCJ + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_GCJ + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_GCJ + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_GCJ + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_GCJ + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_GCJ + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_GCJ + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_GCJ + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_GCJ + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_GCJ + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_GCJ + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_GCJ + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_GCJ + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_GCJ + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_GCJ + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_GCJ + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_GCJ + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_GCJ" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_GCJ + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_GCJ + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_GCJ + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_GCJ + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + else + tagname="" + fi + ;; + + RC) + + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +objext_RC=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }\n' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${RC-"windres"} +compiler=$CC +compiler_RC=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + +lt_cv_prog_compiler_c_o_RC=yes + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_RC \ + CC_RC \ + LD_RC \ + lt_prog_compiler_wl_RC \ + lt_prog_compiler_pic_RC \ + lt_prog_compiler_static_RC \ + lt_prog_compiler_no_builtin_flag_RC \ + export_dynamic_flag_spec_RC \ + thread_safe_flag_spec_RC \ + whole_archive_flag_spec_RC \ + enable_shared_with_static_runtimes_RC \ + old_archive_cmds_RC \ + old_archive_from_new_cmds_RC \ + predep_objects_RC \ + postdep_objects_RC \ + predeps_RC \ + postdeps_RC \ + compiler_lib_search_path_RC \ + archive_cmds_RC \ + archive_expsym_cmds_RC \ + postinstall_cmds_RC \ + postuninstall_cmds_RC \ + old_archive_from_expsyms_cmds_RC \ + allow_undefined_flag_RC \ + no_undefined_flag_RC \ + export_symbols_cmds_RC \ + hardcode_libdir_flag_spec_RC \ + hardcode_libdir_flag_spec_ld_RC \ + hardcode_libdir_separator_RC \ + hardcode_automatic_RC \ + module_cmds_RC \ + module_expsym_cmds_RC \ + lt_cv_prog_compiler_c_o_RC \ + exclude_expsyms_RC \ + include_expsyms_RC; do + + case $var in + old_archive_cmds_RC | \ + old_archive_from_new_cmds_RC | \ + archive_cmds_RC | \ + archive_expsym_cmds_RC | \ + module_cmds_RC | \ + module_expsym_cmds_RC | \ + old_archive_from_expsyms_cmds_RC | \ + export_symbols_cmds_RC | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_RC + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_RC + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_RC + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_RC + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_RC + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_RC + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_RC +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_RC + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_RC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_RC + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_RC + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_RC + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_RC + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_RC +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_RC + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_RC + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_RC +archive_expsym_cmds=$lt_archive_expsym_cmds_RC +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_RC +module_expsym_cmds=$lt_module_expsym_cmds_RC + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_RC + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_RC + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_RC + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_RC + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_RC + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_RC + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_RC + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_RC + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_RC + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_RC + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_RC + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_RC + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_RC + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_RC + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_RC + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_RC + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_RC" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_RC + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_RC + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_RC + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_RC + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + ;; + + *) + { { echo "$as_me:$LINENO: error: Unsupported tag name: $tagname" >&5 +echo "$as_me: error: Unsupported tag name: $tagname" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + + # Append the new tag name to the list of available tags. + if test -n "$tagname" ; then + available_tags="$available_tags $tagname" + fi + fi + done + IFS="$lt_save_ifs" + + # Now substitute the updated list of available tags. + if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then + mv "${ofile}T" "$ofile" + chmod +x "$ofile" + else + rm -f "${ofile}T" + { { echo "$as_me:$LINENO: error: unable to update list of available tagged configurations." >&5 +echo "$as_me: error: unable to update list of available tagged configurations." >&2;} + { (exit 1); exit 1; }; } + fi +fi + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + +# Prevent multiple expansion + + + + + + + + + + + + + + + + + + + + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +{ echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 +echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6; } +if test -z "$INSTALL"; then +if test "${ac_cv_path_install+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in + ./ | .// | /cC/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + done + done + ;; +esac +done +IFS=$as_save_IFS + + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ echo "$as_me:$LINENO: result: $INSTALL" >&5 +echo "${ECHO_T}$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_AWK+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AWK="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { echo "$as_me:$LINENO: result: $AWK" >&5 +echo "${ECHO_T}$AWK" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$AWK" && break +done + + +######### +# Set up an appropriate program prefix +# +if test "$program_prefix" = "NONE"; then + program_prefix="" +fi + + +VERSION=`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'` +echo "Version set to $VERSION" + +RELEASE=`cat $srcdir/VERSION` +echo "Release set to $RELEASE" + +VERSION_NUMBER=`cat $srcdir/VERSION \ + | sed 's/[^0-9]/ /g' \ + | awk '{printf "%d%03d%03d",$1,$2,$3}'` +echo "Version number set to $VERSION_NUMBER" + + +######### +# Check to see if the --with-hints=FILE option is used. If there is none, +# then check for a files named "$host.hints" and ../$hosts.hints where +# $host is the hostname of the build system. If still no hints are +# found, try looking in $system.hints and ../$system.hints where +# $system is the result of uname -s. +# + +# Check whether --with-hints was given. +if test "${with_hints+set}" = set; then + withval=$with_hints; hints=$withval +fi + +if test "$hints" = ""; then + host=`hostname | sed 's/\..*//'` + if test -r $host.hints; then + hints=$host.hints + else + if test -r ../$host.hints; then + hints=../$host.hints + fi + fi +fi +if test "$hints" = ""; then + sys=`uname -s` + if test -r $sys.hints; then + hints=$sys.hints + else + if test -r ../$sys.hints; then + hints=../$sys.hints + fi + fi +fi +if test "$hints" != ""; then + { echo "$as_me:$LINENO: result: reading hints from $hints" >&5 +echo "${ECHO_T}reading hints from $hints" >&6; } + . $hints +fi + +######### +# Locate a compiler for the build machine. This compiler should +# generate command-line programs that run on the build machine. +# +if test x"$cross_compiling" = xno; then + BUILD_CC=$CC + BUILD_CFLAGS=$CFLAGS +else + if test "${BUILD_CC+set}" != set; then + for ac_prog in gcc cc cl +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_BUILD_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$BUILD_CC"; then + ac_cv_prog_BUILD_CC="$BUILD_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_BUILD_CC="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +BUILD_CC=$ac_cv_prog_BUILD_CC +if test -n "$BUILD_CC"; then + { echo "$as_me:$LINENO: result: $BUILD_CC" >&5 +echo "${ECHO_T}$BUILD_CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$BUILD_CC" && break +done + + fi + if test "${BUILD_CFLAGS+set}" != set; then + BUILD_CFLAGS="-g" + fi +fi + + + +########## +# Do we want to support multithreaded use of sqlite +# +# Check whether --enable-threadsafe was given. +if test "${enable_threadsafe+set}" = set; then + enableval=$enable_threadsafe; +else + enable_threadsafe=yes +fi + +{ echo "$as_me:$LINENO: checking whether to support threadsafe operation" >&5 +echo $ECHO_N "checking whether to support threadsafe operation... $ECHO_C" >&6; } +if test "$enable_threadsafe" = "no"; then + SQLITE_THREADSAFE=0 + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +else + SQLITE_THREADSAFE=1 + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +fi + + +if test "$SQLITE_THREADSAFE" = "1"; then + LIBS="" + +{ echo "$as_me:$LINENO: checking for pthread_create in -lpthread" >&5 +echo $ECHO_N "checking for pthread_create in -lpthread... $ECHO_C" >&6; } +if test "${ac_cv_lib_pthread_pthread_create+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_pthread_pthread_create=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_pthread_pthread_create=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_pthread_create" >&5 +echo "${ECHO_T}$ac_cv_lib_pthread_pthread_create" >&6; } +if test $ac_cv_lib_pthread_pthread_create = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPTHREAD 1 +_ACEOF + + LIBS="-lpthread $LIBS" + +fi + + TARGET_THREAD_LIB="$LIBS" + LIBS="" +else + TARGET_THREAD_LIB="" +fi + + +########## +# Do we want to allow a connection created in one thread to be used +# in another thread. This does not work on many Linux systems (ex: RedHat 9) +# due to bugs in the threading implementations. This is thus off by default. +# +# Check whether --enable-cross-thread-connections was given. +if test "${enable_cross_thread_connections+set}" = set; then + enableval=$enable_cross_thread_connections; +else + enable_xthreadconnect=no +fi + +{ echo "$as_me:$LINENO: checking whether to allow connections to be shared across threads" >&5 +echo $ECHO_N "checking whether to allow connections to be shared across threads... $ECHO_C" >&6; } +if test "$enable_xthreadconnect" = "no"; then + XTHREADCONNECT='' + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +else + XTHREADCONNECT='-DSQLITE_ALLOW_XTHREAD_CONNECT=1' + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +fi + + +########## +# Do we want to set threadsOverrideEachOthersLocks variable to be 1 (true) by +# default. Normally, a test at runtime is performed to determine the +# appropriate value of this variable. Use this option only if you're sure that +# threads can safely override each others locks in all runtime situations. +# +# Check whether --enable-threads-override-locks was given. +if test "${enable_threads_override_locks+set}" = set; then + enableval=$enable_threads_override_locks; +else + enable_threads_override_locks=no +fi + +{ echo "$as_me:$LINENO: checking whether threads can override each others locks" >&5 +echo $ECHO_N "checking whether threads can override each others locks... $ECHO_C" >&6; } +if test "$enable_threads_override_locks" = "no"; then + THREADSOVERRIDELOCKS='-1' + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +else + THREADSOVERRIDELOCKS='1' + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +fi + + +########## +# Do we want to support release +# +# Check whether --enable-releasemode was given. +if test "${enable_releasemode+set}" = set; then + enableval=$enable_releasemode; +else + enable_releasemode=no +fi + +{ echo "$as_me:$LINENO: checking whether to support shared library linked as release mode or not" >&5 +echo $ECHO_N "checking whether to support shared library linked as release mode or not... $ECHO_C" >&6; } +if test "$enable_releasemode" = "no"; then + ALLOWRELEASE="" + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +else + ALLOWRELEASE="-release `cat VERSION`" + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +fi + + +########## +# Do we want temporary databases in memory +# +# Check whether --enable-tempstore was given. +if test "${enable_tempstore+set}" = set; then + enableval=$enable_tempstore; +else + enable_tempstore=no +fi + +{ echo "$as_me:$LINENO: checking whether to use an in-ram database for temporary tables" >&5 +echo $ECHO_N "checking whether to use an in-ram database for temporary tables... $ECHO_C" >&6; } +case "$enable_tempstore" in + never ) + TEMP_STORE=0 + { echo "$as_me:$LINENO: result: never" >&5 +echo "${ECHO_T}never" >&6; } + ;; + no ) + TEMP_STORE=1 + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + ;; + always ) + TEMP_STORE=3 + { echo "$as_me:$LINENO: result: always" >&5 +echo "${ECHO_T}always" >&6; } + ;; + yes ) + TEMP_STORE=3 + { echo "$as_me:$LINENO: result: always" >&5 +echo "${ECHO_T}always" >&6; } + ;; + * ) + TEMP_STORE=1 + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } + ;; +esac + + + +########### +# Lots of things are different if we are compiling for Windows using +# the CYGWIN environment. So check for that special case and handle +# things accordingly. +# +{ echo "$as_me:$LINENO: checking if executables have the .exe suffix" >&5 +echo $ECHO_N "checking if executables have the .exe suffix... $ECHO_C" >&6; } +if test "$config_BUILD_EXEEXT" = ".exe"; then + CYGWIN=yes + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +else + { echo "$as_me:$LINENO: result: unknown" >&5 +echo "${ECHO_T}unknown" >&6; } +fi +if test "$CYGWIN" != "yes"; then + { echo "$as_me:$LINENO: checking host system type" >&5 +echo $ECHO_N "checking host system type... $ECHO_C" >&6; } +if test "${ac_cv_host+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 +echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} + { (exit 1); exit 1; }; } +fi + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_host" >&5 +echo "${ECHO_T}$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) { { echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 +echo "$as_me: error: invalid value of canonical host" >&2;} + { (exit 1); exit 1; }; };; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +case $host_os in + *cygwin* ) CYGWIN=yes;; + * ) CYGWIN=no;; +esac + +fi +if test "$CYGWIN" = "yes"; then + BUILD_EXEEXT=.exe +else + BUILD_EXEEXT=$EXEEXT +fi +if test x"$cross_compiling" = xno; then + TARGET_EXEEXT=$BUILD_EXEEXT +else + TARGET_EXEEXT=$config_TARGET_EXEEXT +fi +if test "$TARGET_EXEEXT" = ".exe"; then + if test $OS2_SHELL ; then + OS_UNIX=0 + OS_WIN=0 + OS_OS2=1 + TARGET_CFLAGS="$TARGET_CFLAGS -DOS_OS2=1" + if test "$ac_compiler_gnu" == "yes" ; then + TARGET_CFLAGS="$TARGET_CFLAGS -Zomf -Zexe -Zmap" + BUILD_CFLAGS="$BUILD_CFLAGS -Zomf -Zexe" + fi + else + OS_UNIX=0 + OS_WIN=1 + OS_OS2=0 + tclsubdir=win + TARGET_CFLAGS="$TARGET_CFLAGS -DOS_WIN=1" + fi +else + OS_UNIX=1 + OS_WIN=0 + OS_OS2=0 + tclsubdir=unix + TARGET_CFLAGS="$TARGET_CFLAGS -DOS_UNIX=1" +fi + + + + + + + +########## +# Figure out all the parameters needed to compile against Tcl. +# +# This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG +# macros in the in the tcl.m4 file of the standard TCL distribution. +# Those macros could not be used directly since we have to make some +# minor changes to accomodate systems that do not have TCL installed. +# +# Check whether --enable-tcl was given. +if test "${enable_tcl+set}" = set; then + enableval=$enable_tcl; use_tcl=$enableval +else + use_tcl=yes +fi + +if test "${use_tcl}" = "yes" ; then + +# Check whether --with-tcl was given. +if test "${with_tcl+set}" = set; then + withval=$with_tcl; with_tclconfig=${withval} +fi + + { echo "$as_me:$LINENO: checking for Tcl configuration" >&5 +echo $ECHO_N "checking for Tcl configuration... $ECHO_C" >&6; } + if test "${ac_cv_c_tclconfig+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + + # First check to see if --with-tcl was specified. + if test x"${with_tclconfig}" != x ; then + if test -f "${with_tclconfig}/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` + else + { { echo "$as_me:$LINENO: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&5 +echo "$as_me: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&2;} + { (exit 1); exit 1; }; } + fi + fi + # then check for a private Tcl installation + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ../tcl \ + `ls -dr ../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../tcl[8-9].[0-9]* 2>/dev/null` \ + ../../tcl \ + `ls -dr ../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../../tcl[8-9].[0-9]* 2>/dev/null` \ + ../../../tcl \ + `ls -dr ../../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../../../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../../../tcl[8-9].[0-9]* 2>/dev/null` + do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + + # check in a few common install locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + `ls -d ${libdir} 2>/dev/null` \ + `ls -d /usr/local/lib 2>/dev/null` \ + `ls -d /usr/contrib/lib 2>/dev/null` \ + `ls -d /usr/lib 2>/dev/null` + do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i; pwd)` + break + fi + done + fi + + # check in a few other private locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ${srcdir}/../tcl \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9]* 2>/dev/null` + do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + +fi + + + if test x"${ac_cv_c_tclconfig}" = x ; then + use_tcl=no + { echo "$as_me:$LINENO: WARNING: Can't find Tcl configuration definitions" >&5 +echo "$as_me: WARNING: Can't find Tcl configuration definitions" >&2;} + { echo "$as_me:$LINENO: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&5 +echo "$as_me: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&2;} + { echo "$as_me:$LINENO: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&5 +echo "$as_me: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&2;} + else + TCL_BIN_DIR=${ac_cv_c_tclconfig} + { echo "$as_me:$LINENO: result: found $TCL_BIN_DIR/tclConfig.sh" >&5 +echo "${ECHO_T}found $TCL_BIN_DIR/tclConfig.sh" >&6; } + + { echo "$as_me:$LINENO: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5 +echo $ECHO_N "checking for existence of $TCL_BIN_DIR/tclConfig.sh... $ECHO_C" >&6; } + if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then + { echo "$as_me:$LINENO: result: loading" >&5 +echo "${ECHO_T}loading" >&6; } + . $TCL_BIN_DIR/tclConfig.sh + else + { echo "$as_me:$LINENO: result: file not found" >&5 +echo "${ECHO_T}file not found" >&6; } + fi + + # + # If the TCL_BIN_DIR is the build directory (not the install directory), + # then set the common variable name to the value of the build variables. + # For example, the variable TCL_LIB_SPEC will be set to the value + # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC + # instead of TCL_BUILD_LIB_SPEC since it will work with both an + # installed and uninstalled version of Tcl. + # + + if test -f $TCL_BIN_DIR/Makefile ; then + TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC} + TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC} + TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH} + fi + + # + # eval is required to do the TCL_DBGX substitution + # + + eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" + eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" + eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" + + eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" + eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" + eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" + + + + + + + + + + + + + + + fi +fi +if test "${use_tcl}" = "no" ; then + HAVE_TCL="" +else + HAVE_TCL=1 +fi + + +########## +# Figure out what C libraries are required to compile programs +# that use "readline()" library. +# +TARGET_READLINE_LIBS="" +TARGET_READLINE_INC="" +TARGET_HAVE_READLINE=0 +# Check whether --enable-readline was given. +if test "${enable_readline+set}" = set; then + enableval=$enable_readline; with_readline=$enableval +else + with_readline=auto +fi + + +if test x"$with_readline" != xno; then + found="yes" + + +# Check whether --with-readline-lib was given. +if test "${with_readline_lib+set}" = set; then + withval=$with_readline_lib; with_readline_lib=$withval +else + with_readline_lib="auto" +fi + + if test "x$with_readline_lib" = xauto; then + save_LIBS="$LIBS" + LIBS="" + { echo "$as_me:$LINENO: checking for library containing tgetent" >&5 +echo $ECHO_N "checking for library containing tgetent... $ECHO_C" >&6; } +if test "${ac_cv_search_tgetent+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_func_search_save_LIBS=$LIBS +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char tgetent (); +int +main () +{ +return tgetent (); + ; + return 0; +} +_ACEOF +for ac_lib in '' readline ncurses curses termcap; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_search_tgetent=$ac_res +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext + if test "${ac_cv_search_tgetent+set}" = set; then + break +fi +done +if test "${ac_cv_search_tgetent+set}" = set; then + : +else + ac_cv_search_tgetent=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_search_tgetent" >&5 +echo "${ECHO_T}$ac_cv_search_tgetent" >&6; } +ac_res=$ac_cv_search_tgetent +if test "$ac_res" != no; then + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + term_LIBS="$LIBS" +else + term_LIBS="" +fi + + { echo "$as_me:$LINENO: checking for readline in -lreadline" >&5 +echo $ECHO_N "checking for readline in -lreadline... $ECHO_C" >&6; } +if test "${ac_cv_lib_readline_readline+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lreadline $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char readline (); +int +main () +{ +return readline (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_readline_readline=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_readline_readline=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_readline_readline" >&5 +echo "${ECHO_T}$ac_cv_lib_readline_readline" >&6; } +if test $ac_cv_lib_readline_readline = yes; then + TARGET_READLINE_LIBS="-lreadline" +else + found="no" +fi + + TARGET_READLINE_LIBS="$TARGET_READLINE_LIBS $term_LIBS" + LIBS="$save_LIBS" + else + TARGET_READLINE_LIBS="$with_readline_lib" + fi + + +# Check whether --with-readline-inc was given. +if test "${with_readline_inc+set}" = set; then + withval=$with_readline_inc; with_readline_inc=$withval +else + with_readline_inc="auto" +fi + + if test "x$with_readline_inc" = xauto; then + if test "${ac_cv_header_readline_h+set}" = set; then + { echo "$as_me:$LINENO: checking for readline.h" >&5 +echo $ECHO_N "checking for readline.h... $ECHO_C" >&6; } +if test "${ac_cv_header_readline_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +{ echo "$as_me:$LINENO: result: $ac_cv_header_readline_h" >&5 +echo "${ECHO_T}$ac_cv_header_readline_h" >&6; } +else + # Is the header compilable? +{ echo "$as_me:$LINENO: checking readline.h usability" >&5 +echo $ECHO_N "checking readline.h usability... $ECHO_C" >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_compiler=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6; } + +# Is the header present? +{ echo "$as_me:$LINENO: checking readline.h presence" >&5 +echo $ECHO_N "checking readline.h presence... $ECHO_C" >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi + +rm -f conftest.err conftest.$ac_ext +{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: readline.h: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: readline.h: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: readline.h: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: readline.h: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: readline.h: present but cannot be compiled" >&5 +echo "$as_me: WARNING: readline.h: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: readline.h: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: readline.h: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: readline.h: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: readline.h: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: readline.h: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: readline.h: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: readline.h: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: readline.h: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: readline.h: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: readline.h: in the future, the compiler will take precedence" >&2;} + + ;; +esac +{ echo "$as_me:$LINENO: checking for readline.h" >&5 +echo $ECHO_N "checking for readline.h... $ECHO_C" >&6; } +if test "${ac_cv_header_readline_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_header_readline_h=$ac_header_preproc +fi +{ echo "$as_me:$LINENO: result: $ac_cv_header_readline_h" >&5 +echo "${ECHO_T}$ac_cv_header_readline_h" >&6; } + +fi +if test $ac_cv_header_readline_h = yes; then + found="yes" +else + + found="no" + if test "$cross_compiling" != yes; then + for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do + for subdir in include include/readline; do + as_ac_File=`echo "ac_cv_file_$dir/$subdir/readline.h" | $as_tr_sh` +{ echo "$as_me:$LINENO: checking for $dir/$subdir/readline.h" >&5 +echo $ECHO_N "checking for $dir/$subdir/readline.h... $ECHO_C" >&6; } +if { as_var=$as_ac_File; eval "test \"\${$as_var+set}\" = set"; }; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + test "$cross_compiling" = yes && + { { echo "$as_me:$LINENO: error: cannot check for file existence when cross compiling" >&5 +echo "$as_me: error: cannot check for file existence when cross compiling" >&2;} + { (exit 1); exit 1; }; } +if test -r "$dir/$subdir/readline.h"; then + eval "$as_ac_File=yes" +else + eval "$as_ac_File=no" +fi +fi +ac_res=`eval echo '${'$as_ac_File'}'` + { echo "$as_me:$LINENO: result: $ac_res" >&5 +echo "${ECHO_T}$ac_res" >&6; } +if test `eval echo '${'$as_ac_File'}'` = yes; then + found=yes +fi + + if test "$found" = "yes"; then + TARGET_READLINE_INC="-I$dir/$subdir" + break + fi + done + test "$found" = "yes" && break + done + fi + +fi + + + else + TARGET_READLINE_INC="$with_readline_inc" + fi + + if test x"$found" = xno; then + TARGET_READLINE_LIBS="" + TARGET_READLINE_INC="" + TARGET_HAVE_READLINE=0 + else + TARGET_HAVE_READLINE=1 + fi +fi + + + + + +########## +# Figure out what C libraries are required to compile programs +# that use "fdatasync()" function. +# +{ echo "$as_me:$LINENO: checking for library containing fdatasync" >&5 +echo $ECHO_N "checking for library containing fdatasync... $ECHO_C" >&6; } +if test "${ac_cv_search_fdatasync+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_func_search_save_LIBS=$LIBS +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char fdatasync (); +int +main () +{ +return fdatasync (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_search_fdatasync=$ac_res +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext + if test "${ac_cv_search_fdatasync+set}" = set; then + break +fi +done +if test "${ac_cv_search_fdatasync+set}" = set; then + : +else + ac_cv_search_fdatasync=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_search_fdatasync" >&5 +echo "${ECHO_T}$ac_cv_search_fdatasync" >&6; } +ac_res=$ac_cv_search_fdatasync +if test "$ac_res" != no; then + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + +######### +# check for debug enabled +# Check whether --enable-debug was given. +if test "${enable_debug+set}" = set; then + enableval=$enable_debug; use_debug=$enableval +else + use_debug=no +fi + +if test "${use_debug}" = "yes" ; then + TARGET_DEBUG="-DSQLITE_DEBUG=1" +else + TARGET_DEBUG="-DNDEBUG" +fi + + +######### +# Figure out whether or not we have a "usleep()" function. +# +{ echo "$as_me:$LINENO: checking for usleep" >&5 +echo $ECHO_N "checking for usleep... $ECHO_C" >&6; } +if test "${ac_cv_func_usleep+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define usleep to an innocuous variant, in case declares usleep. + For example, HP-UX 11i declares gettimeofday. */ +#define usleep innocuous_usleep + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char usleep (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef usleep + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char usleep (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_usleep || defined __stub___usleep +choke me +#endif + +int +main () +{ +return usleep (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_func_usleep=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_func_usleep=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_func_usleep" >&5 +echo "${ECHO_T}$ac_cv_func_usleep" >&6; } +if test $ac_cv_func_usleep = yes; then + TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_USLEEP=1" +fi + + +#-------------------------------------------------------------------- +# Redefine fdatasync as fsync on systems that lack fdatasync +#-------------------------------------------------------------------- + +{ echo "$as_me:$LINENO: checking for fdatasync" >&5 +echo $ECHO_N "checking for fdatasync... $ECHO_C" >&6; } +if test "${ac_cv_func_fdatasync+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define fdatasync to an innocuous variant, in case declares fdatasync. + For example, HP-UX 11i declares gettimeofday. */ +#define fdatasync innocuous_fdatasync + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char fdatasync (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef fdatasync + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char fdatasync (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_fdatasync || defined __stub___fdatasync +choke me +#endif + +int +main () +{ +return fdatasync (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_func_fdatasync=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_func_fdatasync=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_func_fdatasync" >&5 +echo "${ECHO_T}$ac_cv_func_fdatasync" >&6; } +if test $ac_cv_func_fdatasync = yes; then + TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_FDATASYNC=1" +fi + + +######### +# Generate the output files. +# +ac_config_files="$ac_config_files Makefile sqlite3.pc" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 +echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + *) $as_unset $ac_var ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + test "x$cache_file" != "x/dev/null" && + { echo "$as_me:$LINENO: updating cache $cache_file" >&5 +echo "$as_me: updating cache $cache_file" >&6;} + cat confcache >$cache_file + else + { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 +echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +# Transform confdefs.h into DEFS. +# Protect against shell expansion while executing Makefile rules. +# Protect against Makefile macro expansion. +# +# If the first sed substitution is executed (which looks for macros that +# take arguments), then branch to the quote section. Otherwise, +# look for a macro that doesn't take arguments. +ac_script=' +t clear +:clear +s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g +t quote +s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g +t quote +b any +:quote +s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g +s/\[/\\&/g +s/\]/\\&/g +s/\$/$$/g +H +:any +${ + g + s/^\n// + s/\n/ /g + p +} +' +DEFS=`sed -n "$ac_script" confdefs.h` + + +ac_libobjs= +ac_ltlibobjs= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + +: ${CONFIG_STATUS=./config.status} +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 +echo "$as_me: creating $CONFIG_STATUS" >&6;} +cat >$CONFIG_STATUS <<_ACEOF +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +as_nl=' +' +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + { (exit 1); exit 1; } +fi + +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# CDPATH. +$as_unset CDPATH + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir +fi +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 + +# Save the log message, to keep $[0] and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by $as_me, which was +generated by GNU Autoconf 2.61. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF +# Files that config.status was made for. +config_files="$ac_config_files" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +ac_cs_usage="\ +\`$as_me' instantiates files from templates according to the +current configuration. + +Usage: $0 [OPTIONS] [FILE]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + +Configuration files: +$config_files + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +ac_cs_version="\\ +config.status +configured by $0, generated by GNU Autoconf 2.61, + with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" + +Copyright (C) 2006 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +# If no file are specified by the user, then we need to provide default +# value. By we need to know if files were specified by the user. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + echo "$ac_cs_version"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + CONFIG_FILES="$CONFIG_FILES $ac_optarg" + ac_need_defaults=false;; + --he | --h | --help | --hel | -h ) + echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) { echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } ;; + + *) ac_config_targets="$ac_config_targets $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +if \$ac_cs_recheck; then + echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 + CONFIG_SHELL=$SHELL + export CONFIG_SHELL + exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "sqlite3.pc") CONFIG_FILES="$CONFIG_FILES sqlite3.pc" ;; + + *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= + trap 'exit_status=$? + { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status +' 0 + trap '{ (exit 1); exit 1; }' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -n "$tmp" && test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || +{ + echo "$me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} + +# +# Set up the sed scripts for CONFIG_FILES section. +# + +# No need to generate the scripts if there are no CONFIG_FILES. +# This happens for instance when ./config.status config.h +if test -n "$CONFIG_FILES"; then + +_ACEOF + + + +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + cat >conf$$subs.sed <<_ACEOF +SHELL!$SHELL$ac_delim +PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim +PACKAGE_NAME!$PACKAGE_NAME$ac_delim +PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim +PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim +PACKAGE_STRING!$PACKAGE_STRING$ac_delim +PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim +exec_prefix!$exec_prefix$ac_delim +prefix!$prefix$ac_delim +program_transform_name!$program_transform_name$ac_delim +bindir!$bindir$ac_delim +sbindir!$sbindir$ac_delim +libexecdir!$libexecdir$ac_delim +datarootdir!$datarootdir$ac_delim +datadir!$datadir$ac_delim +sysconfdir!$sysconfdir$ac_delim +sharedstatedir!$sharedstatedir$ac_delim +localstatedir!$localstatedir$ac_delim +includedir!$includedir$ac_delim +oldincludedir!$oldincludedir$ac_delim +docdir!$docdir$ac_delim +infodir!$infodir$ac_delim +htmldir!$htmldir$ac_delim +dvidir!$dvidir$ac_delim +pdfdir!$pdfdir$ac_delim +psdir!$psdir$ac_delim +libdir!$libdir$ac_delim +localedir!$localedir$ac_delim +mandir!$mandir$ac_delim +DEFS!$DEFS$ac_delim +ECHO_C!$ECHO_C$ac_delim +ECHO_N!$ECHO_N$ac_delim +ECHO_T!$ECHO_T$ac_delim +LIBS!$LIBS$ac_delim +build_alias!$build_alias$ac_delim +host_alias!$host_alias$ac_delim +target_alias!$target_alias$ac_delim +build!$build$ac_delim +build_cpu!$build_cpu$ac_delim +build_vendor!$build_vendor$ac_delim +build_os!$build_os$ac_delim +host!$host$ac_delim +host_cpu!$host_cpu$ac_delim +host_vendor!$host_vendor$ac_delim +host_os!$host_os$ac_delim +CC!$CC$ac_delim +CFLAGS!$CFLAGS$ac_delim +LDFLAGS!$LDFLAGS$ac_delim +CPPFLAGS!$CPPFLAGS$ac_delim +ac_ct_CC!$ac_ct_CC$ac_delim +EXEEXT!$EXEEXT$ac_delim +OBJEXT!$OBJEXT$ac_delim +GREP!$GREP$ac_delim +EGREP!$EGREP$ac_delim +LN_S!$LN_S$ac_delim +ECHO!$ECHO$ac_delim +AR!$AR$ac_delim +RANLIB!$RANLIB$ac_delim +STRIP!$STRIP$ac_delim +CPP!$CPP$ac_delim +CXX!$CXX$ac_delim +CXXFLAGS!$CXXFLAGS$ac_delim +ac_ct_CXX!$ac_ct_CXX$ac_delim +CXXCPP!$CXXCPP$ac_delim +F77!$F77$ac_delim +FFLAGS!$FFLAGS$ac_delim +ac_ct_F77!$ac_ct_F77$ac_delim +LIBTOOL!$LIBTOOL$ac_delim +INSTALL_PROGRAM!$INSTALL_PROGRAM$ac_delim +INSTALL_SCRIPT!$INSTALL_SCRIPT$ac_delim +INSTALL_DATA!$INSTALL_DATA$ac_delim +AWK!$AWK$ac_delim +program_prefix!$program_prefix$ac_delim +VERSION!$VERSION$ac_delim +RELEASE!$RELEASE$ac_delim +VERSION_NUMBER!$VERSION_NUMBER$ac_delim +BUILD_CC!$BUILD_CC$ac_delim +BUILD_CFLAGS!$BUILD_CFLAGS$ac_delim +SQLITE_THREADSAFE!$SQLITE_THREADSAFE$ac_delim +TARGET_THREAD_LIB!$TARGET_THREAD_LIB$ac_delim +XTHREADCONNECT!$XTHREADCONNECT$ac_delim +THREADSOVERRIDELOCKS!$THREADSOVERRIDELOCKS$ac_delim +ALLOWRELEASE!$ALLOWRELEASE$ac_delim +TEMP_STORE!$TEMP_STORE$ac_delim +BUILD_EXEEXT!$BUILD_EXEEXT$ac_delim +OS_UNIX!$OS_UNIX$ac_delim +OS_WIN!$OS_WIN$ac_delim +OS_OS2!$OS_OS2$ac_delim +TARGET_EXEEXT!$TARGET_EXEEXT$ac_delim +TCL_VERSION!$TCL_VERSION$ac_delim +TCL_BIN_DIR!$TCL_BIN_DIR$ac_delim +TCL_SRC_DIR!$TCL_SRC_DIR$ac_delim +TCL_LIBS!$TCL_LIBS$ac_delim +TCL_INCLUDE_SPEC!$TCL_INCLUDE_SPEC$ac_delim +TCL_LIB_FILE!$TCL_LIB_FILE$ac_delim +TCL_LIB_FLAG!$TCL_LIB_FLAG$ac_delim +TCL_LIB_SPEC!$TCL_LIB_SPEC$ac_delim +_ACEOF + + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then + break + elif $ac_last_try; then + { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` +if test -n "$ac_eof"; then + ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` + ac_eof=`expr $ac_eof + 1` +fi + +cat >>$CONFIG_STATUS <<_ACEOF +cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +_ACEOF +sed ' +s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g +s/^/s,@/; s/!/@,|#_!!_#|/ +:n +t n +s/'"$ac_delim"'$/,g/; t +s/$/\\/; p +N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n +' >>$CONFIG_STATUS >$CONFIG_STATUS <<_ACEOF +CEOF$ac_eof +_ACEOF + + +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + cat >conf$$subs.sed <<_ACEOF +TCL_STUB_LIB_FILE!$TCL_STUB_LIB_FILE$ac_delim +TCL_STUB_LIB_FLAG!$TCL_STUB_LIB_FLAG$ac_delim +TCL_STUB_LIB_SPEC!$TCL_STUB_LIB_SPEC$ac_delim +HAVE_TCL!$HAVE_TCL$ac_delim +TARGET_READLINE_LIBS!$TARGET_READLINE_LIBS$ac_delim +TARGET_READLINE_INC!$TARGET_READLINE_INC$ac_delim +TARGET_HAVE_READLINE!$TARGET_HAVE_READLINE$ac_delim +TARGET_DEBUG!$TARGET_DEBUG$ac_delim +LIBOBJS!$LIBOBJS$ac_delim +LTLIBOBJS!$LTLIBOBJS$ac_delim +_ACEOF + + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 10; then + break + elif $ac_last_try; then + { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` +if test -n "$ac_eof"; then + ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` + ac_eof=`expr $ac_eof + 1` +fi + +cat >>$CONFIG_STATUS <<_ACEOF +cat >"\$tmp/subs-2.sed" <<\CEOF$ac_eof +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end +_ACEOF +sed ' +s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g +s/^/s,@/; s/!/@,|#_!!_#|/ +:n +t n +s/'"$ac_delim"'$/,g/; t +s/$/\\/; p +N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n +' >>$CONFIG_STATUS >$CONFIG_STATUS <<_ACEOF +:end +s/|#_!!_#|//g +CEOF$ac_eof +_ACEOF + + +# VPATH may cause trouble with some makes, so we remove $(srcdir), +# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=/{ +s/:*\$(srcdir):*/:/ +s/:*\${srcdir}:*/:/ +s/:*@srcdir@:*/:/ +s/^\([^=]*=[ ]*\):*/\1/ +s/:*$// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF +fi # test -n "$CONFIG_FILES" + + +for ac_tag in :F $CONFIG_FILES +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5 +echo "$as_me: error: Invalid tag $ac_tag." >&2;} + { (exit 1); exit 1; }; };; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 +echo "$as_me: error: cannot find input file: $ac_f" >&2;} + { (exit 1); exit 1; }; };; + esac + ac_file_inputs="$ac_file_inputs $ac_f" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input="Generated from "`IFS=: + echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure." + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + fi + + case $ac_tag in + *:-:* | *:-) cat >"$tmp/stdin";; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { as_dir="$ac_dir" + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 +echo "$as_me: error: cannot create directory $as_dir" >&2;} + { (exit 1); exit 1; }; }; } + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= + +case `sed -n '/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p +' $ac_file_inputs` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF + sed "$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s&@configure_input@&$configure_input&;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" $ac_file_inputs | sed -f "$tmp/subs-1.sed" | sed -f "$tmp/subs-2.sed" >$tmp/out + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && + { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&5 +echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&2;} + + rm -f "$tmp/stdin" + case $ac_file in + -) cat "$tmp/out"; rm -f "$tmp/out";; + *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;; + esac + ;; + + + + esac + +done # for ac_tag + + +{ (exit 0); exit 0; } +_ACEOF +chmod +x $CONFIG_STATUS +ac_clean_files=$ac_clean_files_save + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || { (exit 1); exit 1; } +fi diff --git a/libraries/sqlite/unix/sqlite-3.5.1/configure.ac b/libraries/sqlite/unix/sqlite-3.5.1/configure.ac new file mode 100644 index 0000000000..311459af0e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/configure.ac @@ -0,0 +1,582 @@ +# +# The build process allows for using a cross-compiler. But the default +# action is to target the same platform that we are running on. The +# configure script needs to discover the following properties of the +# build and target systems: +# +# srcdir +# +# The is the name of the directory that contains the +# "configure" shell script. All source files are +# located relative to this directory. +# +# bindir +# +# The name of the directory where executables should be +# written by the "install" target of the makefile. +# +# program_prefix +# +# Add this prefix to the names of all executables that run +# on the target machine. Default: "" +# +# ENABLE_SHARED +# +# True if shared libraries should be generated. +# +# BUILD_CC +# +# The name of a command that is used to convert C +# source files into executables that run on the build +# platform. +# +# BUILD_CFLAGS +# +# Switches that the build compiler needs in order to construct +# command-line programs. +# +# BUILD_LIBS +# +# Libraries that the build compiler needs in order to construct +# command-line programs. +# +# BUILD_EXEEXT +# +# The filename extension for executables on the build +# platform. "" for Unix and ".exe" for Windows. +# +# TCL_* +# +# Lots of values are read in from the tclConfig.sh script, +# if that script is available. This values are used for +# constructing and installing the TCL extension. +# +# TARGET_READLINE_LIBS +# +# This is the library directives passed to the target linker +# that cause the executable to link against the readline library. +# This might be a switch like "-lreadline" or pathnames of library +# file like "../../src/libreadline.a". +# +# TARGET_READLINE_INC +# +# This variables define the directory that contain header +# files for the readline library. If the compiler is able +# to find on its own, then this can be blank. +# +# TARGET_EXEEXT +# +# The filename extension for executables on the +# target platform. "" for Unix and ".exe" for windows. +# +# The generated configure script will make an attempt to guess +# at all of the above parameters. You can override any of +# the guesses by setting the environment variable named +# "config_AAAA" where "AAAA" is the name of the parameter +# described above. (Exception: srcdir cannot be set this way.) +# If you have a file that sets one or more of these environment +# variables, you can invoke configure as follows: +# +# configure --with-hints=FILE +# +# where FILE is the name of the file that sets the environment +# variables. FILE should be an absolute pathname. +# +# This configure.in file is easy to reuse on other projects. Just +# change the argument to AC_INIT(). And disable any features that +# you don't need (for example BLT) by erasing or commenting out +# the corresponding code. +# +AC_INIT(src/sqlite.h.in) + +dnl Put the RCS revision string after AC_INIT so that it will also +dnl show in in configure. +# The following RCS revision string applies to configure.in +# $Revision: 1.30 $ + +######### +# Programs needed +# +AC_PROG_LIBTOOL +AC_PROG_INSTALL +AC_PROG_AWK + +######### +# Set up an appropriate program prefix +# +if test "$program_prefix" = "NONE"; then + program_prefix="" +fi +AC_SUBST(program_prefix) + +VERSION=[`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'`] +echo "Version set to $VERSION" +AC_SUBST(VERSION) +RELEASE=`cat $srcdir/VERSION` +echo "Release set to $RELEASE" +AC_SUBST(RELEASE) +VERSION_NUMBER=[`cat $srcdir/VERSION \ + | sed 's/[^0-9]/ /g' \ + | awk '{printf "%d%03d%03d",$1,$2,$3}'`] +echo "Version number set to $VERSION_NUMBER" +AC_SUBST(VERSION_NUMBER) + +######### +# Check to see if the --with-hints=FILE option is used. If there is none, +# then check for a files named "$host.hints" and ../$hosts.hints where +# $host is the hostname of the build system. If still no hints are +# found, try looking in $system.hints and ../$system.hints where +# $system is the result of uname -s. +# +AC_ARG_WITH(hints, + AC_HELP_STRING([--with-hints=FILE],[Read configuration options from FILE]), + hints=$withval) +if test "$hints" = ""; then + host=`hostname | sed 's/\..*//'` + if test -r $host.hints; then + hints=$host.hints + else + if test -r ../$host.hints; then + hints=../$host.hints + fi + fi +fi +if test "$hints" = ""; then + sys=`uname -s` + if test -r $sys.hints; then + hints=$sys.hints + else + if test -r ../$sys.hints; then + hints=../$sys.hints + fi + fi +fi +if test "$hints" != ""; then + AC_MSG_RESULT(reading hints from $hints) + . $hints +fi + +######### +# Locate a compiler for the build machine. This compiler should +# generate command-line programs that run on the build machine. +# +if test x"$cross_compiling" = xno; then + BUILD_CC=$CC + BUILD_CFLAGS=$CFLAGS +else + if test "${BUILD_CC+set}" != set; then + AC_CHECK_PROGS(BUILD_CC, gcc cc cl) + fi + if test "${BUILD_CFLAGS+set}" != set; then + BUILD_CFLAGS="-g" + fi +fi +AC_SUBST(BUILD_CC) +AC_SUBST(BUILD_CFLAGS) + +########## +# Do we want to support multithreaded use of sqlite +# +AC_ARG_ENABLE(threadsafe, +AC_HELP_STRING([--enable-threadsafe],[Support threadsafe operation]),,enable_threadsafe=yes) +AC_MSG_CHECKING([whether to support threadsafe operation]) +if test "$enable_threadsafe" = "no"; then + SQLITE_THREADSAFE=0 + AC_MSG_RESULT([no]) +else + SQLITE_THREADSAFE=1 + AC_MSG_RESULT([yes]) +fi +AC_SUBST(SQLITE_THREADSAFE) + +if test "$SQLITE_THREADSAFE" = "1"; then + LIBS="" + AC_CHECK_LIB(pthread, pthread_create) + TARGET_THREAD_LIB="$LIBS" + LIBS="" +else + TARGET_THREAD_LIB="" +fi +AC_SUBST(TARGET_THREAD_LIB) + +########## +# Do we want to allow a connection created in one thread to be used +# in another thread. This does not work on many Linux systems (ex: RedHat 9) +# due to bugs in the threading implementations. This is thus off by default. +# +AC_ARG_ENABLE(cross-thread-connections, +AC_HELP_STRING([--enable-cross-thread-connections],[Allow connection sharing across threads]),,enable_xthreadconnect=no) +AC_MSG_CHECKING([whether to allow connections to be shared across threads]) +if test "$enable_xthreadconnect" = "no"; then + XTHREADCONNECT='' + AC_MSG_RESULT([no]) +else + XTHREADCONNECT='-DSQLITE_ALLOW_XTHREAD_CONNECT=1' + AC_MSG_RESULT([yes]) +fi +AC_SUBST(XTHREADCONNECT) + +########## +# Do we want to set threadsOverrideEachOthersLocks variable to be 1 (true) by +# default. Normally, a test at runtime is performed to determine the +# appropriate value of this variable. Use this option only if you're sure that +# threads can safely override each others locks in all runtime situations. +# +AC_ARG_ENABLE(threads-override-locks, +AC_HELP_STRING([--enable-threads-override-locks],[Threads can override each others locks]),,enable_threads_override_locks=no) +AC_MSG_CHECKING([whether threads can override each others locks]) +if test "$enable_threads_override_locks" = "no"; then + THREADSOVERRIDELOCKS='-1' + AC_MSG_RESULT([no]) +else + THREADSOVERRIDELOCKS='1' + AC_MSG_RESULT([yes]) +fi +AC_SUBST(THREADSOVERRIDELOCKS) + +########## +# Do we want to support release +# +AC_ARG_ENABLE(releasemode, +AC_HELP_STRING([--enable-releasemode],[Support libtool link to release mode]),,enable_releasemode=no) +AC_MSG_CHECKING([whether to support shared library linked as release mode or not]) +if test "$enable_releasemode" = "no"; then + ALLOWRELEASE="" + AC_MSG_RESULT([no]) +else + ALLOWRELEASE="-release `cat VERSION`" + AC_MSG_RESULT([yes]) +fi +AC_SUBST(ALLOWRELEASE) + +########## +# Do we want temporary databases in memory +# +AC_ARG_ENABLE(tempstore, +AC_HELP_STRING([--enable-tempstore],[Use an in-ram database for temporary tables (never,no,yes,always)]),,enable_tempstore=no) +AC_MSG_CHECKING([whether to use an in-ram database for temporary tables]) +case "$enable_tempstore" in + never ) + TEMP_STORE=0 + AC_MSG_RESULT([never]) + ;; + no ) + TEMP_STORE=1 + AC_MSG_RESULT([no]) + ;; + always ) + TEMP_STORE=3 + AC_MSG_RESULT([always]) + ;; + yes ) + TEMP_STORE=3 + AC_MSG_RESULT([always]) + ;; + * ) + TEMP_STORE=1 + AC_MSG_RESULT([yes]) + ;; +esac + +AC_SUBST(TEMP_STORE) + +########### +# Lots of things are different if we are compiling for Windows using +# the CYGWIN environment. So check for that special case and handle +# things accordingly. +# +AC_MSG_CHECKING([if executables have the .exe suffix]) +if test "$config_BUILD_EXEEXT" = ".exe"; then + CYGWIN=yes + AC_MSG_RESULT(yes) +else + AC_MSG_RESULT(unknown) +fi +if test "$CYGWIN" != "yes"; then + AC_CYGWIN +fi +if test "$CYGWIN" = "yes"; then + BUILD_EXEEXT=.exe +else + BUILD_EXEEXT=$EXEEXT +fi +if test x"$cross_compiling" = xno; then + TARGET_EXEEXT=$BUILD_EXEEXT +else + TARGET_EXEEXT=$config_TARGET_EXEEXT +fi +if test "$TARGET_EXEEXT" = ".exe"; then + if test $OS2_SHELL ; then + OS_UNIX=0 + OS_WIN=0 + OS_OS2=1 + TARGET_CFLAGS="$TARGET_CFLAGS -DOS_OS2=1" + if test "$ac_compiler_gnu" == "yes" ; then + TARGET_CFLAGS="$TARGET_CFLAGS -Zomf -Zexe -Zmap" + BUILD_CFLAGS="$BUILD_CFLAGS -Zomf -Zexe" + fi + else + OS_UNIX=0 + OS_WIN=1 + OS_OS2=0 + tclsubdir=win + TARGET_CFLAGS="$TARGET_CFLAGS -DOS_WIN=1" + fi +else + OS_UNIX=1 + OS_WIN=0 + OS_OS2=0 + tclsubdir=unix + TARGET_CFLAGS="$TARGET_CFLAGS -DOS_UNIX=1" +fi + +AC_SUBST(BUILD_EXEEXT) +AC_SUBST(OS_UNIX) +AC_SUBST(OS_WIN) +AC_SUBST(OS_OS2) +AC_SUBST(TARGET_EXEEXT) + +########## +# Figure out all the parameters needed to compile against Tcl. +# +# This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG +# macros in the in the tcl.m4 file of the standard TCL distribution. +# Those macros could not be used directly since we have to make some +# minor changes to accomodate systems that do not have TCL installed. +# +AC_ARG_ENABLE(tcl, AC_HELP_STRING([--disable-tcl],[do not build TCL extension]), + [use_tcl=$enableval],[use_tcl=yes]) +if test "${use_tcl}" = "yes" ; then + AC_ARG_WITH(tcl, AC_HELP_STRING([--with-tcl=DIR],[directory containing tcl configuration (tclConfig.sh)]), with_tclconfig=${withval}) + AC_MSG_CHECKING([for Tcl configuration]) + AC_CACHE_VAL(ac_cv_c_tclconfig,[ + # First check to see if --with-tcl was specified. + if test x"${with_tclconfig}" != x ; then + if test -f "${with_tclconfig}/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` + else + AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh]) + fi + fi + # then check for a private Tcl installation + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ../tcl \ + `ls -dr ../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../tcl \ + `ls -dr ../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../../tcl \ + `ls -dr ../../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../../../tcl[[8-9]].[[0-9]]* 2>/dev/null` + do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + + # check in a few common install locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + `ls -d ${libdir} 2>/dev/null` \ + `ls -d /usr/local/lib 2>/dev/null` \ + `ls -d /usr/contrib/lib 2>/dev/null` \ + `ls -d /usr/lib 2>/dev/null` + do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i; pwd)` + break + fi + done + fi + + # check in a few other private locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ${srcdir}/../tcl \ + `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]]* 2>/dev/null` + do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + ]) + + if test x"${ac_cv_c_tclconfig}" = x ; then + use_tcl=no + AC_MSG_WARN(Can't find Tcl configuration definitions) + AC_MSG_WARN(*** Without Tcl the regression tests cannot be executed ***) + AC_MSG_WARN(*** Consider using --with-tcl=... to define location of Tcl ***) + else + TCL_BIN_DIR=${ac_cv_c_tclconfig} + AC_MSG_RESULT(found $TCL_BIN_DIR/tclConfig.sh) + + AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh]) + if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then + AC_MSG_RESULT([loading]) + . $TCL_BIN_DIR/tclConfig.sh + else + AC_MSG_RESULT([file not found]) + fi + + # + # If the TCL_BIN_DIR is the build directory (not the install directory), + # then set the common variable name to the value of the build variables. + # For example, the variable TCL_LIB_SPEC will be set to the value + # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC + # instead of TCL_BUILD_LIB_SPEC since it will work with both an + # installed and uninstalled version of Tcl. + # + + if test -f $TCL_BIN_DIR/Makefile ; then + TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC} + TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC} + TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH} + fi + + # + # eval is required to do the TCL_DBGX substitution + # + + eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" + eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" + eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" + + eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" + eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" + eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" + + AC_SUBST(TCL_VERSION) + AC_SUBST(TCL_BIN_DIR) + AC_SUBST(TCL_SRC_DIR) + AC_SUBST(TCL_LIBS) + AC_SUBST(TCL_INCLUDE_SPEC) + + AC_SUBST(TCL_LIB_FILE) + AC_SUBST(TCL_LIB_FLAG) + AC_SUBST(TCL_LIB_SPEC) + + AC_SUBST(TCL_STUB_LIB_FILE) + AC_SUBST(TCL_STUB_LIB_FLAG) + AC_SUBST(TCL_STUB_LIB_SPEC) + fi +fi +if test "${use_tcl}" = "no" ; then + HAVE_TCL="" +else + HAVE_TCL=1 +fi +AC_SUBST(HAVE_TCL) + +########## +# Figure out what C libraries are required to compile programs +# that use "readline()" library. +# +TARGET_READLINE_LIBS="" +TARGET_READLINE_INC="" +TARGET_HAVE_READLINE=0 +AC_ARG_ENABLE([readline], + [AC_HELP_STRING([--disable-readline],[disable readline support [default=detect]])], + [with_readline=$enableval], + [with_readline=auto]) + +if test x"$with_readline" != xno; then + found="yes" + + AC_ARG_WITH([readline-lib], + [AC_HELP_STRING([--with-readline-lib],[specify readline library])], + [with_readline_lib=$withval], + [with_readline_lib="auto"]) + if test "x$with_readline_lib" = xauto; then + save_LIBS="$LIBS" + LIBS="" + AC_SEARCH_LIBS(tgetent, [readline ncurses curses termcap], [term_LIBS="$LIBS"], [term_LIBS=""]) + AC_CHECK_LIB([readline], [readline], [TARGET_READLINE_LIBS="-lreadline"], [found="no"]) + TARGET_READLINE_LIBS="$TARGET_READLINE_LIBS $term_LIBS" + LIBS="$save_LIBS" + else + TARGET_READLINE_LIBS="$with_readline_lib" + fi + + AC_ARG_WITH([readline-inc], + [AC_HELP_STRING([--with-readline-inc],[specify readline include paths])], + [with_readline_inc=$withval], + [with_readline_inc="auto"]) + if test "x$with_readline_inc" = xauto; then + AC_CHECK_HEADER(readline.h, [found="yes"], [ + found="no" + if test "$cross_compiling" != yes; then + for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do + for subdir in include include/readline; do + AC_CHECK_FILE($dir/$subdir/readline.h, found=yes) + if test "$found" = "yes"; then + TARGET_READLINE_INC="-I$dir/$subdir" + break + fi + done + test "$found" = "yes" && break + done + fi + ]) + else + TARGET_READLINE_INC="$with_readline_inc" + fi + + if test x"$found" = xno; then + TARGET_READLINE_LIBS="" + TARGET_READLINE_INC="" + TARGET_HAVE_READLINE=0 + else + TARGET_HAVE_READLINE=1 + fi +fi + +AC_SUBST(TARGET_READLINE_LIBS) +AC_SUBST(TARGET_READLINE_INC) +AC_SUBST(TARGET_HAVE_READLINE) + +########## +# Figure out what C libraries are required to compile programs +# that use "fdatasync()" function. +# +AC_SEARCH_LIBS(fdatasync, [rt]) + +######### +# check for debug enabled +AC_ARG_ENABLE(debug, AC_HELP_STRING([--enable-debug],[enable debugging & verbose explain]), + [use_debug=$enableval],[use_debug=no]) +if test "${use_debug}" = "yes" ; then + TARGET_DEBUG="-DSQLITE_DEBUG=1" +else + TARGET_DEBUG="-DNDEBUG" +fi +AC_SUBST(TARGET_DEBUG) + +######### +# Figure out whether or not we have a "usleep()" function. +# +AC_CHECK_FUNC(usleep, [TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_USLEEP=1"]) + +#-------------------------------------------------------------------- +# Redefine fdatasync as fsync on systems that lack fdatasync +#-------------------------------------------------------------------- + +AC_CHECK_FUNC(fdatasync, [TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_FDATASYNC=1"]) + +######### +# Generate the output files. +# +AC_OUTPUT([ +Makefile +sqlite3.pc +]) diff --git a/libraries/sqlite/unix/sqlite-3.5.1/contrib/sqlitecon.tcl b/libraries/sqlite/unix/sqlite-3.5.1/contrib/sqlitecon.tcl new file mode 100644 index 0000000000..b5dbcafc2a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/contrib/sqlitecon.tcl @@ -0,0 +1,679 @@ +# A Tk console widget for SQLite. Invoke sqlitecon::create with a window name, +# a prompt string, a title to set a new top-level window, and the SQLite +# database handle. For example: +# +# sqlitecon::create .sqlcon {sql:- } {SQL Console} db +# +# A toplevel window is created that allows you to type in SQL commands to +# be processed on the spot. +# +# A limited set of dot-commands are supported: +# +# .table +# .schema ?TABLE? +# .mode list|column|multicolumn|line +# .exit +# +# In addition, a new SQL function named "edit()" is created. This function +# takes a single text argument and returns a text result. Whenever the +# the function is called, it pops up a new toplevel window containing a +# text editor screen initialized to the argument. When the "OK" button +# is pressed, whatever revised text is in the text editor is returned as +# the result of the edit() function. This allows text fields of SQL tables +# to be edited quickly and easily as follows: +# +# UPDATE table1 SET dscr = edit(dscr) WHERE rowid=15; +# + + +# Create a namespace to work in +# +namespace eval ::sqlitecon { + # do nothing +} + +# Create a console widget named $w. The prompt string is $prompt. +# The title at the top of the window is $title. The database connection +# object is $db +# +proc sqlitecon::create {w prompt title db} { + upvar #0 $w.t v + if {[winfo exists $w]} {destroy $w} + if {[info exists v]} {unset v} + toplevel $w + wm title $w $title + wm iconname $w $title + frame $w.mb -bd 2 -relief raised + pack $w.mb -side top -fill x + menubutton $w.mb.file -text File -menu $w.mb.file.m + menubutton $w.mb.edit -text Edit -menu $w.mb.edit.m + pack $w.mb.file $w.mb.edit -side left -padx 8 -pady 1 + set m [menu $w.mb.file.m -tearoff 0] + $m add command -label {Close} -command "destroy $w" + sqlitecon::create_child $w $prompt $w.mb.edit.m + set v(db) $db + $db function edit ::sqlitecon::_edit +} + +# This routine creates a console as a child window within a larger +# window. It also creates an edit menu named "$editmenu" if $editmenu!="". +# The calling function is responsible for posting the edit menu. +# +proc sqlitecon::create_child {w prompt editmenu} { + upvar #0 $w.t v + if {$editmenu!=""} { + set m [menu $editmenu -tearoff 0] + $m add command -label Cut -command "sqlitecon::Cut $w.t" + $m add command -label Copy -command "sqlitecon::Copy $w.t" + $m add command -label Paste -command "sqlitecon::Paste $w.t" + $m add command -label {Clear Screen} -command "sqlitecon::Clear $w.t" + $m add separator + $m add command -label {Save As...} -command "sqlitecon::SaveFile $w.t" + catch {$editmenu config -postcommand "sqlitecon::EnableEditMenu $w"} + } + scrollbar $w.sb -orient vertical -command "$w.t yview" + pack $w.sb -side right -fill y + text $w.t -font fixed -yscrollcommand "$w.sb set" + pack $w.t -side right -fill both -expand 1 + bindtags $w.t Sqlitecon + set v(editmenu) $editmenu + set v(history) 0 + set v(historycnt) 0 + set v(current) -1 + set v(prompt) $prompt + set v(prior) {} + set v(plength) [string length $v(prompt)] + set v(x) 0 + set v(y) 0 + set v(mode) column + set v(header) on + $w.t mark set insert end + $w.t tag config ok -foreground blue + $w.t tag config err -foreground red + $w.t insert end $v(prompt) + $w.t mark set out 1.0 + after idle "focus $w.t" +} + +bind Sqlitecon <1> {sqlitecon::Button1 %W %x %y} +bind Sqlitecon {sqlitecon::B1Motion %W %x %y} +bind Sqlitecon {sqlitecon::B1Leave %W %x %y} +bind Sqlitecon {sqlitecon::cancelMotor %W} +bind Sqlitecon {sqlitecon::cancelMotor %W} +bind Sqlitecon {sqlitecon::Insert %W %A} +bind Sqlitecon {sqlitecon::Left %W} +bind Sqlitecon {sqlitecon::Left %W} +bind Sqlitecon {sqlitecon::Right %W} +bind Sqlitecon {sqlitecon::Right %W} +bind Sqlitecon {sqlitecon::Backspace %W} +bind Sqlitecon {sqlitecon::Backspace %W} +bind Sqlitecon {sqlitecon::Delete %W} +bind Sqlitecon {sqlitecon::Delete %W} +bind Sqlitecon {sqlitecon::Home %W} +bind Sqlitecon {sqlitecon::Home %W} +bind Sqlitecon {sqlitecon::End %W} +bind Sqlitecon {sqlitecon::End %W} +bind Sqlitecon {sqlitecon::Enter %W} +bind Sqlitecon {sqlitecon::Enter %W} +bind Sqlitecon {sqlitecon::Prior %W} +bind Sqlitecon {sqlitecon::Prior %W} +bind Sqlitecon {sqlitecon::Next %W} +bind Sqlitecon {sqlitecon::Next %W} +bind Sqlitecon {sqlitecon::EraseEOL %W} +bind Sqlitecon <> {sqlitecon::Cut %W} +bind Sqlitecon <> {sqlitecon::Copy %W} +bind Sqlitecon <> {sqlitecon::Paste %W} +bind Sqlitecon <> {sqlitecon::Clear %W} + +# Insert a single character at the insertion cursor +# +proc sqlitecon::Insert {w a} { + $w insert insert $a + $w yview insert +} + +# Move the cursor one character to the left +# +proc sqlitecon::Left {w} { + upvar #0 $w v + scan [$w index insert] %d.%d row col + if {$col>$v(plength)} { + $w mark set insert "insert -1c" + } +} + +# Erase the character to the left of the cursor +# +proc sqlitecon::Backspace {w} { + upvar #0 $w v + scan [$w index insert] %d.%d row col + if {$col>$v(plength)} { + $w delete {insert -1c} + } +} + +# Erase to the end of the line +# +proc sqlitecon::EraseEOL {w} { + upvar #0 $w v + scan [$w index insert] %d.%d row col + if {$col>=$v(plength)} { + $w delete insert {insert lineend} + } +} + +# Move the cursor one character to the right +# +proc sqlitecon::Right {w} { + $w mark set insert "insert +1c" +} + +# Erase the character to the right of the cursor +# +proc sqlitecon::Delete w { + $w delete insert +} + +# Move the cursor to the beginning of the current line +# +proc sqlitecon::Home w { + upvar #0 $w v + scan [$w index insert] %d.%d row col + $w mark set insert $row.$v(plength) +} + +# Move the cursor to the end of the current line +# +proc sqlitecon::End w { + $w mark set insert {insert lineend} +} + +# Add a line to the history +# +proc sqlitecon::addHistory {w line} { + upvar #0 $w v + if {$v(historycnt)>0} { + set last [lindex $v(history) [expr $v(historycnt)-1]] + if {[string compare $last $line]} { + lappend v(history) $line + incr v(historycnt) + } + } else { + set v(history) [list $line] + set v(historycnt) 1 + } + set v(current) $v(historycnt) +} + +# Called when "Enter" is pressed. Do something with the line +# of text that was entered. +# +proc sqlitecon::Enter w { + upvar #0 $w v + scan [$w index insert] %d.%d row col + set start $row.$v(plength) + set line [$w get $start "$start lineend"] + $w insert end \n + $w mark set out end + if {$v(prior)==""} { + set cmd $line + } else { + set cmd $v(prior)\n$line + } + if {[string index $cmd 0]=="." || [$v(db) complete $cmd]} { + regsub -all {\n} [string trim $cmd] { } cmd2 + addHistory $w $cmd2 + set rc [catch {DoCommand $w $cmd} res] + if {![winfo exists $w]} return + if {$rc} { + $w insert end $res\n err + } elseif {[string length $res]>0} { + $w insert end $res\n ok + } + set v(prior) {} + $w insert end $v(prompt) + } else { + set v(prior) $cmd + regsub -all {[^ ]} $v(prompt) . x + $w insert end $x + } + $w mark set insert end + $w mark set out {insert linestart} + $w yview insert +} + +# Execute a single SQL command. Pay special attention to control +# directives that begin with "." +# +# The return value is the text output from the command, properly +# formatted. +# +proc sqlitecon::DoCommand {w cmd} { + upvar #0 $w v + set mode $v(mode) + set header $v(header) + if {[regexp {^(\.[a-z]+)} $cmd all word]} { + if {$word==".mode"} { + regexp {^.[a-z]+ +([a-z]+)} $cmd all v(mode) + return {} + } elseif {$word==".exit"} { + destroy [winfo toplevel $w] + return {} + } elseif {$word==".header"} { + regexp {^.[a-z]+ +([a-z]+)} $cmd all v(header) + return {} + } elseif {$word==".tables"} { + set mode multicolumn + set cmd {SELECT name FROM sqlite_master WHERE type='table' + UNION ALL + SELECT name FROM sqlite_temp_master WHERE type='table'} + $v(db) eval {PRAGMA database_list} { + if {$name!="temp" && $name!="main"} { + append cmd "UNION ALL SELECT name FROM $name.sqlite_master\ + WHERE type='table'" + } + } + append cmd { ORDER BY 1} + } elseif {$word==".fullschema"} { + set pattern % + regexp {^.[a-z]+ +([^ ]+)} $cmd all pattern + set mode list + set header 0 + set cmd "SELECT sql FROM sqlite_master WHERE tbl_name LIKE '$pattern' + AND sql NOT NULL UNION ALL SELECT sql FROM sqlite_temp_master + WHERE tbl_name LIKE '$pattern' AND sql NOT NULL" + $v(db) eval {PRAGMA database_list} { + if {$name!="temp" && $name!="main"} { + append cmd " UNION ALL SELECT sql FROM $name.sqlite_master\ + WHERE tbl_name LIKE '$pattern' AND sql NOT NULL" + } + } + } elseif {$word==".schema"} { + set pattern % + regexp {^.[a-z]+ +([^ ]+)} $cmd all pattern + set mode list + set header 0 + set cmd "SELECT sql FROM sqlite_master WHERE name LIKE '$pattern' + AND sql NOT NULL UNION ALL SELECT sql FROM sqlite_temp_master + WHERE name LIKE '$pattern' AND sql NOT NULL" + $v(db) eval {PRAGMA database_list} { + if {$name!="temp" && $name!="main"} { + append cmd " UNION ALL SELECT sql FROM $name.sqlite_master\ + WHERE name LIKE '$pattern' AND sql NOT NULL" + } + } + } else { + return \ + ".exit\n.mode line|list|column\n.schema ?TABLENAME?\n.tables" + } + } + set res {} + if {$mode=="list"} { + $v(db) eval $cmd x { + set sep {} + foreach col $x(*) { + append res $sep$x($col) + set sep | + } + append res \n + } + if {[info exists x(*)] && $header} { + set sep {} + set hdr {} + foreach col $x(*) { + append hdr $sep$col + set sep | + } + set res $hdr\n$res + } + } elseif {[string range $mode 0 2]=="col"} { + set y {} + $v(db) eval $cmd x { + foreach col $x(*) { + if {![info exists cw($col)] || $cw($col)<[string length $x($col)]} { + set cw($col) [string length $x($col)] + } + lappend y $x($col) + } + } + if {[info exists x(*)] && $header} { + set hdr {} + set ln {} + set dash --------------------------------------------------------------- + append dash ------------------------------------------------------------ + foreach col $x(*) { + if {![info exists cw($col)] || $cw($col)<[string length $col]} { + set cw($col) [string length $col] + } + lappend hdr $col + lappend ln [string range $dash 1 $cw($col)] + } + set y [concat $hdr $ln $y] + } + if {[info exists x(*)]} { + set format {} + set arglist {} + set arglist2 {} + set i 0 + foreach col $x(*) { + lappend arglist x$i + append arglist2 " \$x$i" + incr i + append format " %-$cw($col)s" + } + set format [string trimleft $format]\n + if {[llength $arglist]>0} { + foreach $arglist $y "append res \[format [list $format] $arglist2\]" + } + } + } elseif {$mode=="multicolumn"} { + set y [$v(db) eval $cmd] + set max 0 + foreach e $y { + if {$max<[string length $e]} {set max [string length $e]} + } + set ncol [expr {int(80/($max+2))}] + if {$ncol<1} {set ncol 1} + set nelem [llength $y] + set nrow [expr {($nelem+$ncol-1)/$ncol}] + set format "%-${max}s" + for {set i 0} {$i<$nrow} {incr i} { + set j $i + while 1 { + append res [format $format [lindex $y $j]] + incr j $nrow + if {$j>=$nelem} break + append res { } + } + append res \n + } + } else { + $v(db) eval $cmd x { + foreach col $x(*) {append res "$col = $x($col)\n"} + append res \n + } + } + return [string trimright $res] +} + +# Change the line to the previous line +# +proc sqlitecon::Prior w { + upvar #0 $w v + if {$v(current)<=0} return + incr v(current) -1 + set line [lindex $v(history) $v(current)] + sqlitecon::SetLine $w $line +} + +# Change the line to the next line +# +proc sqlitecon::Next w { + upvar #0 $w v + if {$v(current)>=$v(historycnt)} return + incr v(current) 1 + set line [lindex $v(history) $v(current)] + sqlitecon::SetLine $w $line +} + +# Change the contents of the entry line +# +proc sqlitecon::SetLine {w line} { + upvar #0 $w v + scan [$w index insert] %d.%d row col + set start $row.$v(plength) + $w delete $start end + $w insert end $line + $w mark set insert end + $w yview insert +} + +# Called when the mouse button is pressed at position $x,$y on +# the console widget. +# +proc sqlitecon::Button1 {w x y} { + global tkPriv + upvar #0 $w v + set v(mouseMoved) 0 + set v(pressX) $x + set p [sqlitecon::nearestBoundry $w $x $y] + scan [$w index insert] %d.%d ix iy + scan $p %d.%d px py + if {$px==$ix} { + $w mark set insert $p + } + $w mark set anchor $p + focus $w +} + +# Find the boundry between characters that is nearest +# to $x,$y +# +proc sqlitecon::nearestBoundry {w x y} { + set p [$w index @$x,$y] + set bb [$w bbox $p] + if {![string compare $bb ""]} {return $p} + if {($x-[lindex $bb 0])<([lindex $bb 2]/2)} {return $p} + $w index "$p + 1 char" +} + +# This routine extends the selection to the point specified by $x,$y +# +proc sqlitecon::SelectTo {w x y} { + upvar #0 $w v + set cur [sqlitecon::nearestBoundry $w $x $y] + if {[catch {$w index anchor}]} { + $w mark set anchor $cur + } + set anchor [$w index anchor] + if {[$w compare $cur != $anchor] || (abs($v(pressX) - $x) >= 3)} { + if {$v(mouseMoved)==0} { + $w tag remove sel 0.0 end + } + set v(mouseMoved) 1 + } + if {[$w compare $cur < anchor]} { + set first $cur + set last anchor + } else { + set first anchor + set last $cur + } + if {$v(mouseMoved)} { + $w tag remove sel 0.0 $first + $w tag add sel $first $last + $w tag remove sel $last end + update idletasks + } +} + +# Called whenever the mouse moves while button-1 is held down. +# +proc sqlitecon::B1Motion {w x y} { + upvar #0 $w v + set v(y) $y + set v(x) $x + sqlitecon::SelectTo $w $x $y +} + +# Called whenever the mouse leaves the boundries of the widget +# while button 1 is held down. +# +proc sqlitecon::B1Leave {w x y} { + upvar #0 $w v + set v(y) $y + set v(x) $x + sqlitecon::motor $w +} + +# This routine is called to automatically scroll the window when +# the mouse drags offscreen. +# +proc sqlitecon::motor w { + upvar #0 $w v + if {![winfo exists $w]} return + if {$v(y)>=[winfo height $w]} { + $w yview scroll 1 units + } elseif {$v(y)<0} { + $w yview scroll -1 units + } else { + return + } + sqlitecon::SelectTo $w $v(x) $v(y) + set v(timer) [after 50 sqlitecon::motor $w] +} + +# This routine cancels the scrolling motor if it is active +# +proc sqlitecon::cancelMotor w { + upvar #0 $w v + catch {after cancel $v(timer)} + catch {unset v(timer)} +} + +# Do a Copy operation on the stuff currently selected. +# +proc sqlitecon::Copy w { + if {![catch {set text [$w get sel.first sel.last]}]} { + clipboard clear -displayof $w + clipboard append -displayof $w $text + } +} + +# Return 1 if the selection exists and is contained +# entirely on the input line. Return 2 if the selection +# exists but is not entirely on the input line. Return 0 +# if the selection does not exist. +# +proc sqlitecon::canCut w { + set r [catch { + scan [$w index sel.first] %d.%d s1x s1y + scan [$w index sel.last] %d.%d s2x s2y + scan [$w index insert] %d.%d ix iy + }] + if {$r==1} {return 0} + if {$s1x==$ix && $s2x==$ix} {return 1} + return 2 +} + +# Do a Cut operation if possible. Cuts are only allowed +# if the current selection is entirely contained on the +# current input line. +# +proc sqlitecon::Cut w { + if {[sqlitecon::canCut $w]==1} { + sqlitecon::Copy $w + $w delete sel.first sel.last + } +} + +# Do a paste opeation. +# +proc sqlitecon::Paste w { + if {[sqlitecon::canCut $w]==1} { + $w delete sel.first sel.last + } + if {[catch {selection get -displayof $w -selection CLIPBOARD} topaste] + && [catch {selection get -displayof $w -selection PRIMARY} topaste]} { + return + } + if {[info exists ::$w]} { + set prior 0 + foreach line [split $topaste \n] { + if {$prior} { + sqlitecon::Enter $w + update + } + set prior 1 + $w insert insert $line + } + } else { + $w insert insert $topaste + } +} + +# Enable or disable entries in the Edit menu +# +proc sqlitecon::EnableEditMenu w { + upvar #0 $w.t v + set m $v(editmenu) + if {$m=="" || ![winfo exists $m]} return + switch [sqlitecon::canCut $w.t] { + 0 { + $m entryconf Copy -state disabled + $m entryconf Cut -state disabled + } + 1 { + $m entryconf Copy -state normal + $m entryconf Cut -state normal + } + 2 { + $m entryconf Copy -state normal + $m entryconf Cut -state disabled + } + } +} + +# Prompt the user for the name of a writable file. Then write the +# entire contents of the console screen to that file. +# +proc sqlitecon::SaveFile w { + set types { + {{Text Files} {.txt}} + {{All Files} *} + } + set f [tk_getSaveFile -filetypes $types -title "Write Screen To..."] + if {$f!=""} { + if {[catch {open $f w} fd]} { + tk_messageBox -type ok -icon error -message $fd + } else { + puts $fd [string trimright [$w get 1.0 end] \n] + close $fd + } + } +} + +# Erase everything from the console above the insertion line. +# +proc sqlitecon::Clear w { + $w delete 1.0 {insert linestart} +} + +# An in-line editor for SQL +# +proc sqlitecon::_edit {origtxt {title {}}} { + for {set i 0} {[winfo exists .ed$i]} {incr i} continue + set w .ed$i + toplevel $w + wm protocol $w WM_DELETE_WINDOW "$w.b.can invoke" + wm title $w {Inline SQL Editor} + frame $w.b + pack $w.b -side bottom -fill x + button $w.b.can -text Cancel -width 6 -command [list set ::$w 0] + button $w.b.ok -text OK -width 6 -command [list set ::$w 1] + button $w.b.cut -text Cut -width 6 -command [list ::sqlitecon::Cut $w.t] + button $w.b.copy -text Copy -width 6 -command [list ::sqlitecon::Copy $w.t] + button $w.b.paste -text Paste -width 6 -command [list ::sqlitecon::Paste $w.t] + set ::$w {} + pack $w.b.cut $w.b.copy $w.b.paste $w.b.can $w.b.ok\ + -side left -padx 5 -pady 5 -expand 1 + if {$title!=""} { + label $w.title -text $title + pack $w.title -side top -padx 5 -pady 5 + } + text $w.t -bg white -fg black -yscrollcommand [list $w.sb set] + pack $w.t -side left -fill both -expand 1 + scrollbar $w.sb -orient vertical -command [list $w.t yview] + pack $w.sb -side left -fill y + $w.t insert end $origtxt + + vwait ::$w + + if {[set ::$w]} { + set txt [string trimright [$w.t get 1.0 end]] + } else { + set txt $origtxt + } + destroy $w + return $txt +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/doc/lemon.html b/libraries/sqlite/unix/sqlite-3.5.1/doc/lemon.html new file mode 100644 index 0000000000..6a4d6dbd2c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/doc/lemon.html @@ -0,0 +1,892 @@ + + +The Lemon Parser Generator + + +

The Lemon Parser Generator

+ +

Lemon is an LALR(1) parser generator for C or C++. +It does the same job as ``bison'' and ``yacc''. +But lemon is not another bison or yacc clone. It +uses a different grammar syntax which is designed to +reduce the number of coding errors. Lemon also uses a more +sophisticated parsing engine that is faster than yacc and +bison and which is both reentrant and thread-safe. +Furthermore, Lemon implements features that can be used +to eliminate resource leaks, making is suitable for use +in long-running programs such as graphical user interfaces +or embedded controllers.

+ +

This document is an introduction to the Lemon +parser generator.

+ +

Theory of Operation

+ +

The main goal of Lemon is to translate a context free grammar (CFG) +for a particular language into C code that implements a parser for +that language. +The program has two inputs: +

    +
  • The grammar specification. +
  • A parser template file. +
+Typically, only the grammar specification is supplied by the programmer. +Lemon comes with a default parser template which works fine for most +applications. But the user is free to substitute a different parser +template if desired.

+ +

Depending on command-line options, Lemon will generate between +one and three files of outputs. +

    +
  • C code to implement the parser. +
  • A header file defining an integer ID for each terminal symbol. +
  • An information file that describes the states of the generated parser + automaton. +
+By default, all three of these output files are generated. +The header file is suppressed if the ``-m'' command-line option is +used and the report file is omitted when ``-q'' is selected.

+ +

The grammar specification file uses a ``.y'' suffix, by convention. +In the examples used in this document, we'll assume the name of the +grammar file is ``gram.y''. A typical use of Lemon would be the +following command: +

+   lemon gram.y
+
+This command will generate three output files named ``gram.c'', +``gram.h'' and ``gram.out''. +The first is C code to implement the parser. The second +is the header file that defines numerical values for all +terminal symbols, and the last is the report that explains +the states used by the parser automaton.

+ +

Command Line Options

+ +

The behavior of Lemon can be modified using command-line options. +You can obtain a list of the available command-line options together +with a brief explanation of what each does by typing +

+   lemon -?
+
+As of this writing, the following command-line options are supported: +
    +
  • -b +
  • -c +
  • -g +
  • -m +
  • -q +
  • -s +
  • -x +
+The ``-b'' option reduces the amount of text in the report file by +printing only the basis of each parser state, rather than the full +configuration. +The ``-c'' option suppresses action table compression. Using -c +will make the parser a little larger and slower but it will detect +syntax errors sooner. +The ``-g'' option causes no output files to be generated at all. +Instead, the input grammar file is printed on standard output but +with all comments, actions and other extraneous text deleted. This +is a useful way to get a quick summary of a grammar. +The ``-m'' option causes the output C source file to be compatible +with the ``makeheaders'' program. +Makeheaders is a program that automatically generates header files +from C source code. When the ``-m'' option is used, the header +file is not output since the makeheaders program will take care +of generated all header files automatically. +The ``-q'' option suppresses the report file. +Using ``-s'' causes a brief summary of parser statistics to be +printed. Like this: +
+   Parser statistics: 74 terminals, 70 nonterminals, 179 rules
+                      340 states, 2026 parser table entries, 0 conflicts
+
+Finally, the ``-x'' option causes Lemon to print its version number +and then stops without attempting to read the grammar or generate a parser.

+ +

The Parser Interface

+ +

Lemon doesn't generate a complete, working program. It only generates +a few subroutines that implement a parser. This section describes +the interface to those subroutines. It is up to the programmer to +call these subroutines in an appropriate way in order to produce a +complete system.

+ +

Before a program begins using a Lemon-generated parser, the program +must first create the parser. +A new parser is created as follows: +

+   void *pParser = ParseAlloc( malloc );
+
+The ParseAlloc() routine allocates and initializes a new parser and +returns a pointer to it. +The actual data structure used to represent a parser is opaque -- +its internal structure is not visible or usable by the calling routine. +For this reason, the ParseAlloc() routine returns a pointer to void +rather than a pointer to some particular structure. +The sole argument to the ParseAlloc() routine is a pointer to the +subroutine used to allocate memory. Typically this means ``malloc()''.

+ +

After a program is finished using a parser, it can reclaim all +memory allocated by that parser by calling +

+   ParseFree(pParser, free);
+
+The first argument is the same pointer returned by ParseAlloc(). The +second argument is a pointer to the function used to release bulk +memory back to the system.

+ +

After a parser has been allocated using ParseAlloc(), the programmer +must supply the parser with a sequence of tokens (terminal symbols) to +be parsed. This is accomplished by calling the following function +once for each token: +

+   Parse(pParser, hTokenID, sTokenData, pArg);
+
+The first argument to the Parse() routine is the pointer returned by +ParseAlloc(). +The second argument is a small positive integer that tells the parse the +type of the next token in the data stream. +There is one token type for each terminal symbol in the grammar. +The gram.h file generated by Lemon contains #define statements that +map symbolic terminal symbol names into appropriate integer values. +(A value of 0 for the second argument is a special flag to the +parser to indicate that the end of input has been reached.) +The third argument is the value of the given token. By default, +the type of the third argument is integer, but the grammar will +usually redefine this type to be some kind of structure. +Typically the second argument will be a broad category of tokens +such as ``identifier'' or ``number'' and the third argument will +be the name of the identifier or the value of the number.

+ +

The Parse() function may have either three or four arguments, +depending on the grammar. If the grammar specification file request +it, the Parse() function will have a fourth parameter that can be +of any type chosen by the programmer. The parser doesn't do anything +with this argument except to pass it through to action routines. +This is a convenient mechanism for passing state information down +to the action routines without having to use global variables.

+ +

A typical use of a Lemon parser might look something like the +following: +

+   01 ParseTree *ParseFile(const char *zFilename){
+   02    Tokenizer *pTokenizer;
+   03    void *pParser;
+   04    Token sToken;
+   05    int hTokenId;
+   06    ParserState sState;
+   07
+   08    pTokenizer = TokenizerCreate(zFilename);
+   09    pParser = ParseAlloc( malloc );
+   10    InitParserState(&sState);
+   11    while( GetNextToken(pTokenizer, &hTokenId, &sToken) ){
+   12       Parse(pParser, hTokenId, sToken, &sState);
+   13    }
+   14    Parse(pParser, 0, sToken, &sState);
+   15    ParseFree(pParser, free );
+   16    TokenizerFree(pTokenizer);
+   17    return sState.treeRoot;
+   18 }
+
+This example shows a user-written routine that parses a file of +text and returns a pointer to the parse tree. +(We've omitted all error-handling from this example to keep it +simple.) +We assume the existence of some kind of tokenizer which is created +using TokenizerCreate() on line 8 and deleted by TokenizerFree() +on line 16. The GetNextToken() function on line 11 retrieves the +next token from the input file and puts its type in the +integer variable hTokenId. The sToken variable is assumed to be +some kind of structure that contains details about each token, +such as its complete text, what line it occurs on, etc.

+ +

This example also assumes the existence of structure of type +ParserState that holds state information about a particular parse. +An instance of such a structure is created on line 6 and initialized +on line 10. A pointer to this structure is passed into the Parse() +routine as the optional 4th argument. +The action routine specified by the grammar for the parser can use +the ParserState structure to hold whatever information is useful and +appropriate. In the example, we note that the treeRoot field of +the ParserState structure is left pointing to the root of the parse +tree.

+ +

The core of this example as it relates to Lemon is as follows: +

+   ParseFile(){
+      pParser = ParseAlloc( malloc );
+      while( GetNextToken(pTokenizer,&hTokenId, &sToken) ){
+         Parse(pParser, hTokenId, sToken);
+      }
+      Parse(pParser, 0, sToken);
+      ParseFree(pParser, free );
+   }
+
+Basically, what a program has to do to use a Lemon-generated parser +is first create the parser, then send it lots of tokens obtained by +tokenizing an input source. When the end of input is reached, the +Parse() routine should be called one last time with a token type +of 0. This step is necessary to inform the parser that the end of +input has been reached. Finally, we reclaim memory used by the +parser by calling ParseFree().

+ +

There is one other interface routine that should be mentioned +before we move on. +The ParseTrace() function can be used to generate debugging output +from the parser. A prototype for this routine is as follows: +

+   ParseTrace(FILE *stream, char *zPrefix);
+
+After this routine is called, a short (one-line) message is written +to the designated output stream every time the parser changes states +or calls an action routine. Each such message is prefaced using +the text given by zPrefix. This debugging output can be turned off +by calling ParseTrace() again with a first argument of NULL (0).

+ +

Differences With YACC and BISON

+ +

Programmers who have previously used the yacc or bison parser +generator will notice several important differences between yacc and/or +bison and Lemon. +

    +
  • In yacc and bison, the parser calls the tokenizer. In Lemon, + the tokenizer calls the parser. +
  • Lemon uses no global variables. Yacc and bison use global variables + to pass information between the tokenizer and parser. +
  • Lemon allows multiple parsers to be running simultaneously. Yacc + and bison do not. +
+These differences may cause some initial confusion for programmers +with prior yacc and bison experience. +But after years of experience using Lemon, I firmly +believe that the Lemon way of doing things is better.

+ +

Input File Syntax

+ +

The main purpose of the grammar specification file for Lemon is +to define the grammar for the parser. But the input file also +specifies additional information Lemon requires to do its job. +Most of the work in using Lemon is in writing an appropriate +grammar file.

+ +

The grammar file for lemon is, for the most part, free format. +It does not have sections or divisions like yacc or bison. Any +declaration can occur at any point in the file. +Lemon ignores whitespace (except where it is needed to separate +tokens) and it honors the same commenting conventions as C and C++.

+ +

Terminals and Nonterminals

+ +

A terminal symbol (token) is any string of alphanumeric +and underscore characters +that begins with an upper case letter. +A terminal can contain lower class letters after the first character, +but the usual convention is to make terminals all upper case. +A nonterminal, on the other hand, is any string of alphanumeric +and underscore characters than begins with a lower case letter. +Again, the usual convention is to make nonterminals use all lower +case letters.

+ +

In Lemon, terminal and nonterminal symbols do not need to +be declared or identified in a separate section of the grammar file. +Lemon is able to generate a list of all terminals and nonterminals +by examining the grammar rules, and it can always distinguish a +terminal from a nonterminal by checking the case of the first +character of the name.

+ +

Yacc and bison allow terminal symbols to have either alphanumeric +names or to be individual characters included in single quotes, like +this: ')' or '$'. Lemon does not allow this alternative form for +terminal symbols. With Lemon, all symbols, terminals and nonterminals, +must have alphanumeric names.

+ +

Grammar Rules

+ +

The main component of a Lemon grammar file is a sequence of grammar +rules. +Each grammar rule consists of a nonterminal symbol followed by +the special symbol ``::='' and then a list of terminals and/or nonterminals. +The rule is terminated by a period. +The list of terminals and nonterminals on the right-hand side of the +rule can be empty. +Rules can occur in any order, except that the left-hand side of the +first rule is assumed to be the start symbol for the grammar (unless +specified otherwise using the %start directive described below.) +A typical sequence of grammar rules might look something like this: +

+  expr ::= expr PLUS expr.
+  expr ::= expr TIMES expr.
+  expr ::= LPAREN expr RPAREN.
+  expr ::= VALUE.
+
+

+ +

There is one non-terminal in this example, ``expr'', and five +terminal symbols or tokens: ``PLUS'', ``TIMES'', ``LPAREN'', +``RPAREN'' and ``VALUE''.

+ +

Like yacc and bison, Lemon allows the grammar to specify a block +of C code that will be executed whenever a grammar rule is reduced +by the parser. +In Lemon, this action is specified by putting the C code (contained +within curly braces {...}) immediately after the +period that closes the rule. +For example: +

+  expr ::= expr PLUS expr.   { printf("Doing an addition...\n"); }
+
+

+ +

In order to be useful, grammar actions must normally be linked to +their associated grammar rules. +In yacc and bison, this is accomplished by embedding a ``$$'' in the +action to stand for the value of the left-hand side of the rule and +symbols ``$1'', ``$2'', and so forth to stand for the value of +the terminal or nonterminal at position 1, 2 and so forth on the +right-hand side of the rule. +This idea is very powerful, but it is also very error-prone. The +single most common source of errors in a yacc or bison grammar is +to miscount the number of symbols on the right-hand side of a grammar +rule and say ``$7'' when you really mean ``$8''.

+ +

Lemon avoids the need to count grammar symbols by assigning symbolic +names to each symbol in a grammar rule and then using those symbolic +names in the action. +In yacc or bison, one would write this: +

+  expr -> expr PLUS expr  { $$ = $1 + $3; };
+
+But in Lemon, the same rule becomes the following: +
+  expr(A) ::= expr(B) PLUS expr(C).  { A = B+C; }
+
+In the Lemon rule, any symbol in parentheses after a grammar rule +symbol becomes a place holder for that symbol in the grammar rule. +This place holder can then be used in the associated C action to +stand for the value of that symbol.

+ +

The Lemon notation for linking a grammar rule with its reduce +action is superior to yacc/bison on several counts. +First, as mentioned above, the Lemon method avoids the need to +count grammar symbols. +Secondly, if a terminal or nonterminal in a Lemon grammar rule +includes a linking symbol in parentheses but that linking symbol +is not actually used in the reduce action, then an error message +is generated. +For example, the rule +

+  expr(A) ::= expr(B) PLUS expr(C).  { A = B; }
+
+will generate an error because the linking symbol ``C'' is used +in the grammar rule but not in the reduce action.

+ +

The Lemon notation for linking grammar rules to reduce actions +also facilitates the use of destructors for reclaiming memory +allocated by the values of terminals and nonterminals on the +right-hand side of a rule.

+ +

Precedence Rules

+ +

Lemon resolves parsing ambiguities in exactly the same way as +yacc and bison. A shift-reduce conflict is resolved in favor +of the shift, and a reduce-reduce conflict is resolved by reducing +whichever rule comes first in the grammar file.

+ +

Just like in +yacc and bison, Lemon allows a measure of control +over the resolution of paring conflicts using precedence rules. +A precedence value can be assigned to any terminal symbol +using the %left, %right or %nonassoc directives. Terminal symbols +mentioned in earlier directives have a lower precedence that +terminal symbols mentioned in later directives. For example:

+ +

+   %left AND.
+   %left OR.
+   %nonassoc EQ NE GT GE LT LE.
+   %left PLUS MINUS.
+   %left TIMES DIVIDE MOD.
+   %right EXP NOT.
+

+ +

In the preceding sequence of directives, the AND operator is +defined to have the lowest precedence. The OR operator is one +precedence level higher. And so forth. Hence, the grammar would +attempt to group the ambiguous expression +

+     a AND b OR c
+
+like this +
+     a AND (b OR c).
+
+The associativity (left, right or nonassoc) is used to determine +the grouping when the precedence is the same. AND is left-associative +in our example, so +
+     a AND b AND c
+
+is parsed like this +
+     (a AND b) AND c.
+
+The EXP operator is right-associative, though, so +
+     a EXP b EXP c
+
+is parsed like this +
+     a EXP (b EXP c).
+
+The nonassoc precedence is used for non-associative operators. +So +
+     a EQ b EQ c
+
+is an error.

+ +

The precedence of non-terminals is transferred to rules as follows: +The precedence of a grammar rule is equal to the precedence of the +left-most terminal symbol in the rule for which a precedence is +defined. This is normally what you want, but in those cases where +you want to precedence of a grammar rule to be something different, +you can specify an alternative precedence symbol by putting the +symbol in square braces after the period at the end of the rule and +before any C-code. For example:

+ +

+   expr = MINUS expr.  [NOT]
+

+ +

This rule has a precedence equal to that of the NOT symbol, not the +MINUS symbol as would have been the case by default.

+ +

With the knowledge of how precedence is assigned to terminal +symbols and individual +grammar rules, we can now explain precisely how parsing conflicts +are resolved in Lemon. Shift-reduce conflicts are resolved +as follows: +

    +
  • If either the token to be shifted or the rule to be reduced + lacks precedence information, then resolve in favor of the + shift, but report a parsing conflict. +
  • If the precedence of the token to be shifted is greater than + the precedence of the rule to reduce, then resolve in favor + of the shift. No parsing conflict is reported. +
  • If the precedence of the token it be shifted is less than the + precedence of the rule to reduce, then resolve in favor of the + reduce action. No parsing conflict is reported. +
  • If the precedences are the same and the shift token is + right-associative, then resolve in favor of the shift. + No parsing conflict is reported. +
  • If the precedences are the same the the shift token is + left-associative, then resolve in favor of the reduce. + No parsing conflict is reported. +
  • Otherwise, resolve the conflict by doing the shift and + report the parsing conflict. +
+Reduce-reduce conflicts are resolved this way: +
    +
  • If either reduce rule + lacks precedence information, then resolve in favor of the + rule that appears first in the grammar and report a parsing + conflict. +
  • If both rules have precedence and the precedence is different + then resolve the dispute in favor of the rule with the highest + precedence and do not report a conflict. +
  • Otherwise, resolve the conflict by reducing by the rule that + appears first in the grammar and report a parsing conflict. +
+ +

Special Directives

+ +

The input grammar to Lemon consists of grammar rules and special +directives. We've described all the grammar rules, so now we'll +talk about the special directives.

+ +

Directives in lemon can occur in any order. You can put them before +the grammar rules, or after the grammar rules, or in the mist of the +grammar rules. It doesn't matter. The relative order of +directives used to assign precedence to terminals is important, but +other than that, the order of directives in Lemon is arbitrary.

+ +

Lemon supports the following special directives: +

    +
  • %code +
  • %default_destructor +
  • %default_type +
  • %destructor +
  • %extra_argument +
  • %include +
  • %left +
  • %name +
  • %nonassoc +
  • %parse_accept +
  • %parse_failure +
  • %right +
  • %stack_overflow +
  • %stack_size +
  • %start_symbol +
  • %syntax_error +
  • %token_destructor +
  • %token_prefix +
  • %token_type +
  • %type +
+Each of these directives will be described separately in the +following sections:

+ +

The %code directive

+ +

The %code directive is used to specify addition C/C++ code that +is added to the end of the main output file. This is similar to +the %include directive except that %include is inserted at the +beginning of the main output file.

+ +

%code is typically used to include some action routines or perhaps +a tokenizer as part of the output file.

+ +

The %default_destructor directive

+ +

The %default_destructor directive specifies a destructor to +use for non-terminals that do not have their own destructor +specified by a separate %destructor directive. See the documentation +on the %destructor directive below for additional information.

+ +

In some grammers, many different non-terminal symbols have the +same datatype and hence the same destructor. This directive is +a convenience way to specify the same destructor for all those +non-terminals using a single statement.

+ +

The %default_type directive

+ +

The %default_type directive specifies the datatype of non-terminal +symbols that do no have their own datatype defined using a separate +%type directive. See the documentation on %type below for addition +information.

+ +

The %destructor directive

+ +

The %destructor directive is used to specify a destructor for +a non-terminal symbol. +(See also the %token_destructor directive which is used to +specify a destructor for terminal symbols.)

+ +

A non-terminal's destructor is called to dispose of the +non-terminal's value whenever the non-terminal is popped from +the stack. This includes all of the following circumstances: +

    +
  • When a rule reduces and the value of a non-terminal on + the right-hand side is not linked to C code. +
  • When the stack is popped during error processing. +
  • When the ParseFree() function runs. +
+The destructor can do whatever it wants with the value of +the non-terminal, but its design is to deallocate memory +or other resources held by that non-terminal.

+ +

Consider an example: +

+   %type nt {void*}
+   %destructor nt { free($$); }
+   nt(A) ::= ID NUM.   { A = malloc( 100 ); }
+
+This example is a bit contrived but it serves to illustrate how +destructors work. The example shows a non-terminal named +``nt'' that holds values of type ``void*''. When the rule for +an ``nt'' reduces, it sets the value of the non-terminal to +space obtained from malloc(). Later, when the nt non-terminal +is popped from the stack, the destructor will fire and call +free() on this malloced space, thus avoiding a memory leak. +(Note that the symbol ``$$'' in the destructor code is replaced +by the value of the non-terminal.)

+ +

It is important to note that the value of a non-terminal is passed +to the destructor whenever the non-terminal is removed from the +stack, unless the non-terminal is used in a C-code action. If +the non-terminal is used by C-code, then it is assumed that the +C-code will take care of destroying it if it should really +be destroyed. More commonly, the value is used to build some +larger structure and we don't want to destroy it, which is why +the destructor is not called in this circumstance.

+ +

By appropriate use of destructors, it is possible to +build a parser using Lemon that can be used within a long-running +program, such as a GUI, that will not leak memory or other resources. +To do the same using yacc or bison is much more difficult.

+ +

The %extra_argument directive

+ +The %extra_argument directive instructs Lemon to add a 4th parameter +to the parameter list of the Parse() function it generates. Lemon +doesn't do anything itself with this extra argument, but it does +make the argument available to C-code action routines, destructors, +and so forth. For example, if the grammar file contains:

+ +

+    %extra_argument { MyStruct *pAbc }
+

+ +

Then the Parse() function generated will have an 4th parameter +of type ``MyStruct*'' and all action routines will have access to +a variable named ``pAbc'' that is the value of the 4th parameter +in the most recent call to Parse().

+ +

The %include directive

+ +

The %include directive specifies C code that is included at the +top of the generated parser. You can include any text you want -- +the Lemon parser generator copies it blindly. If you have multiple +%include directives in your grammar file the value of the last +%include directive overwrites all the others.The %include directive is very handy for getting some extra #include +preprocessor statements at the beginning of the generated parser. +For example:

+ +

+   %include {#include <unistd.h>}
+

+ +

This might be needed, for example, if some of the C actions in the +grammar call functions that are prototyed in unistd.h.

+ +

The %left directive

+ +The %left directive is used (along with the %right and +%nonassoc directives) to declare precedences of terminal +symbols. Every terminal symbol whose name appears after +a %left directive but before the next period (``.'') is +given the same left-associative precedence value. Subsequent +%left directives have higher precedence. For example:

+ +

+   %left AND.
+   %left OR.
+   %nonassoc EQ NE GT GE LT LE.
+   %left PLUS MINUS.
+   %left TIMES DIVIDE MOD.
+   %right EXP NOT.
+

+ +

Note the period that terminates each %left, %right or %nonassoc +directive.

+ +

LALR(1) grammars can get into a situation where they require +a large amount of stack space if you make heavy use or right-associative +operators. For this reason, it is recommended that you use %left +rather than %right whenever possible.

+ +

The %name directive

+ +

By default, the functions generated by Lemon all begin with the +five-character string ``Parse''. You can change this string to something +different using the %name directive. For instance:

+ +

+   %name Abcde
+

+ +

Putting this directive in the grammar file will cause Lemon to generate +functions named +

    +
  • AbcdeAlloc(), +
  • AbcdeFree(), +
  • AbcdeTrace(), and +
  • Abcde(). +
+The %name directive allows you to generator two or more different +parsers and link them all into the same executable. +

+ +

The %nonassoc directive

+ +

This directive is used to assign non-associative precedence to +one or more terminal symbols. See the section on precedence rules +or on the %left directive for additional information.

+ +

The %parse_accept directive

+ +

The %parse_accept directive specifies a block of C code that is +executed whenever the parser accepts its input string. To ``accept'' +an input string means that the parser was able to process all tokens +without error.

+ +

For example:

+ +

+   %parse_accept {
+      printf("parsing complete!\n");
+   }
+

+ + +

The %parse_failure directive

+ +

The %parse_failure directive specifies a block of C code that +is executed whenever the parser fails complete. This code is not +executed until the parser has tried and failed to resolve an input +error using is usual error recovery strategy. The routine is +only invoked when parsing is unable to continue.

+ +

+   %parse_failure {
+     fprintf(stderr,"Giving up.  Parser is hopelessly lost...\n");
+   }
+

+ +

The %right directive

+ +

This directive is used to assign right-associative precedence to +one or more terminal symbols. See the section on precedence rules +or on the %left directive for additional information.

+ +

The %stack_overflow directive

+ +

The %stack_overflow directive specifies a block of C code that +is executed if the parser's internal stack ever overflows. Typically +this just prints an error message. After a stack overflow, the parser +will be unable to continue and must be reset.

+ +

+   %stack_overflow {
+     fprintf(stderr,"Giving up.  Parser stack overflow\n");
+   }
+

+ +

You can help prevent parser stack overflows by avoiding the use +of right recursion and right-precedence operators in your grammar. +Use left recursion and and left-precedence operators instead, to +encourage rules to reduce sooner and keep the stack size down. +For example, do rules like this: +

+   list ::= list element.      // left-recursion.  Good!
+   list ::= .
+
+Not like this: +
+   list ::= element list.      // right-recursion.  Bad!
+   list ::= .
+
+ +

The %stack_size directive

+ +

If stack overflow is a problem and you can't resolve the trouble +by using left-recursion, then you might want to increase the size +of the parser's stack using this directive. Put an positive integer +after the %stack_size directive and Lemon will generate a parse +with a stack of the requested size. The default value is 100.

+ +

+   %stack_size 2000
+

+ +

The %start_symbol directive

+ +

By default, the start-symbol for the grammar that Lemon generates +is the first non-terminal that appears in the grammar file. But you +can choose a different start-symbol using the %start_symbol directive.

+ +

+   %start_symbol  prog
+

+ +

The %token_destructor directive

+ +

The %destructor directive assigns a destructor to a non-terminal +symbol. (See the description of the %destructor directive above.) +This directive does the same thing for all terminal symbols.

+ +

Unlike non-terminal symbols which may each have a different data type +for their values, terminals all use the same data type (defined by +the %token_type directive) and so they use a common destructor. Other +than that, the token destructor works just like the non-terminal +destructors.

+ +

The %token_prefix directive

+ +

Lemon generates #defines that assign small integer constants +to each terminal symbol in the grammar. If desired, Lemon will +add a prefix specified by this directive +to each of the #defines it generates. +So if the default output of Lemon looked like this: +

+    #define AND              1
+    #define MINUS            2
+    #define OR               3
+    #define PLUS             4
+
+You can insert a statement into the grammar like this: +
+    %token_prefix    TOKEN_
+
+to cause Lemon to produce these symbols instead: +
+    #define TOKEN_AND        1
+    #define TOKEN_MINUS      2
+    #define TOKEN_OR         3
+    #define TOKEN_PLUS       4
+
+ +

The %token_type and %type directives

+ +

These directives are used to specify the data types for values +on the parser's stack associated with terminal and non-terminal +symbols. The values of all terminal symbols must be of the same +type. This turns out to be the same data type as the 3rd parameter +to the Parse() function generated by Lemon. Typically, you will +make the value of a terminal symbol by a pointer to some kind of +token structure. Like this:

+ +

+   %token_type    {Token*}
+

+ +

If the data type of terminals is not specified, the default value +is ``int''.

+ +

Non-terminal symbols can each have their own data types. Typically +the data type of a non-terminal is a pointer to the root of a parse-tree +structure that contains all information about that non-terminal. +For example:

+ +

+   %type   expr  {Expr*}
+

+ +

Each entry on the parser's stack is actually a union containing +instances of all data types for every non-terminal and terminal symbol. +Lemon will automatically use the correct element of this union depending +on what the corresponding non-terminal or terminal symbol is. But +the grammar designer should keep in mind that the size of the union +will be the size of its largest element. So if you have a single +non-terminal whose data type requires 1K of storage, then your 100 +entry parser stack will require 100K of heap space. If you are willing +and able to pay that price, fine. You just need to know.

+ +

Error Processing

+ +

After extensive experimentation over several years, it has been +discovered that the error recovery strategy used by yacc is about +as good as it gets. And so that is what Lemon uses.

+ +

When a Lemon-generated parser encounters a syntax error, it +first invokes the code specified by the %syntax_error directive, if +any. It then enters its error recovery strategy. The error recovery +strategy is to begin popping the parsers stack until it enters a +state where it is permitted to shift a special non-terminal symbol +named ``error''. It then shifts this non-terminal and continues +parsing. But the %syntax_error routine will not be called again +until at least three new tokens have been successfully shifted.

+ +

If the parser pops its stack until the stack is empty, and it still +is unable to shift the error symbol, then the %parse_failed routine +is invoked and the parser resets itself to its start state, ready +to begin parsing a new file. This is what will happen at the very +first syntax error, of course, if there are no instances of the +``error'' non-terminal in your grammar.

+ + + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/doc/report1.txt b/libraries/sqlite/unix/sqlite-3.5.1/doc/report1.txt new file mode 100644 index 0000000000..a236e7cedf --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/doc/report1.txt @@ -0,0 +1,121 @@ +An SQLite (version 1.0) database was used in a large military application +where the database contained 105 tables and indices. The following is +a breakdown on the sizes of keys and data within these tables and indices: + +Entries: 967089 +Size: 45896104 +Avg Size: 48 +Key Size: 11112265 +Avg Key Size: 12 +Max Key Size: 99 + + 0..8 263 0% + 9..12 5560 0% + 13..16 71394 7% + 17..24 180717 26% + 25..32 215442 48% + 33..40 151118 64% + 41..48 77479 72% + 49..56 13983 74% + 57..64 14481 75% + 65..80 41342 79% + 81..96 127098 92% + 97..112 38054 96% + 113..128 14197 98% + 129..144 8208 99% + 145..160 3326 99% + 161..176 1242 99% + 177..192 604 99% + 193..208 222 99% + 209..224 213 99% + 225..240 132 99% + 241..256 58 99% + 257..288 515 99% + 289..320 64 99% + 321..352 39 99% + 353..384 44 99% + 385..416 25 99% + 417..448 24 99% + 449..480 26 99% + 481..512 27 99% + 513..1024 470 99% + 1025..2048 396 99% + 2049..4096 187 99% + 4097..8192 78 99% + 8193..16384 35 99% +16385..32768 17 99% +32769..65536 6 99% +65537..65541 3 100% + +If the indices are omitted, the statistics for the 49 tables +become the following: + +Entries: 451103 +Size: 30930282 +Avg Size: 69 +Key Size: 1804412 +Avg Key Size: 4 +Max Key Size: 4 + + 0..24 89 0% + 25..32 9417 2% + 33..40 119162 28% + 41..48 68710 43% + 49..56 9539 45% + 57..64 12435 48% + 65..80 38650 57% + 81..96 126877 85% + 97..112 38030 93% + 113..128 14183 96% + 129..144 7668 98% + 145..160 3302 99% + 161..176 1238 99% + 177..192 597 99% + 193..208 217 99% + 209..224 211 99% + 225..240 130 99% + 241..256 57 99% + 257..288 100 99% + 289..320 62 99% + 321..352 34 99% + 353..384 43 99% + 385..416 24 99% + 417..448 24 99% + 449..480 25 99% + 481..512 27 99% + 513..1024 153 99% + 1025..2048 92 99% + 2049..4096 7 100% + +The 56 indices have these statistics: + +Entries: 512422 +Size: 14879828 +Avg Size: 30 +Key Size: 9253204 +Avg Key Size: 19 +Max Key Size: 99 + + 0..8 246 0% + 9..12 5486 1% + 13..16 70717 14% + 17..24 178246 49% + 25..32 205722 89% + 33..40 31951 96% + 41..48 8768 97% + 49..56 4444 98% + 57..64 2046 99% + 65..80 2691 99% + 81..96 202 99% + 97..112 11 99% + 113..144 527 99% + 145..160 20 99% + 161..288 406 99% + 289..1024 316 99% + 1025..2048 304 99% + 2049..4096 180 99% + 4097..8192 78 99% + 8193..16384 35 99% +16385..32768 17 99% +32769..65536 6 99% +65537..65541 3 100% diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/README.txt b/libraries/sqlite/unix/sqlite-3.5.1/ext/README.txt new file mode 100644 index 0000000000..009495f59d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/README.txt @@ -0,0 +1,2 @@ +Version loadable extensions to SQLite are found in subfolders +of this folder. diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/README.txt b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/README.txt new file mode 100644 index 0000000000..292b7daa0b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/README.txt @@ -0,0 +1,2 @@ +This folder contains source code to the first full-text search +extension for SQLite. diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1.c new file mode 100644 index 0000000000..5a69965d47 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1.c @@ -0,0 +1,3344 @@ +/* fts1 has a design flaw which can lead to database corruption (see +** below). It is recommended not to use it any longer, instead use +** fts3 (or higher). If you believe that your use of fts1 is safe, +** add -DSQLITE_ENABLE_BROKEN_FTS1=1 to your CFLAGS. +*/ +#ifndef SQLITE_ENABLE_BROKEN_FTS1 +#error fts1 has a design flaw and has been deprecated. +#endif +/* The flaw is that fts1 uses the content table's unaliased rowid as +** the unique docid. fts1 embeds the rowid in the index it builds, +** and expects the rowid to not change. The SQLite VACUUM operation +** will renumber such rowids, thereby breaking fts1. If you are using +** fts1 in a system which has disabled VACUUM, then you can continue +** to use it safely. Note that PRAGMA auto_vacuum does NOT disable +** VACUUM, though systems using auto_vacuum are unlikely to invoke +** VACUUM. +** +** fts1 should be safe even across VACUUM if you only insert documents +** and never delete. +*/ + +/* The author disclaims copyright to this source code. + * + * This is an SQLite module implementing full-text search. + */ + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + +#if defined(SQLITE_ENABLE_FTS1) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#include +#include +#include +#include +#include + +#include "fts1.h" +#include "fts1_hash.h" +#include "fts1_tokenizer.h" +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + + +#if 0 +# define TRACE(A) printf A; fflush(stdout) +#else +# define TRACE(A) +#endif + +/* utility functions */ + +typedef struct StringBuffer { + int len; /* length, not including null terminator */ + int alloced; /* Space allocated for s[] */ + char *s; /* Content of the string */ +} StringBuffer; + +static void initStringBuffer(StringBuffer *sb){ + sb->len = 0; + sb->alloced = 100; + sb->s = malloc(100); + sb->s[0] = '\0'; +} + +static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ + if( sb->len + nFrom >= sb->alloced ){ + sb->alloced = sb->len + nFrom + 100; + sb->s = realloc(sb->s, sb->alloced+1); + if( sb->s==0 ){ + initStringBuffer(sb); + return; + } + } + memcpy(sb->s + sb->len, zFrom, nFrom); + sb->len += nFrom; + sb->s[sb->len] = 0; +} +static void append(StringBuffer *sb, const char *zFrom){ + nappend(sb, zFrom, strlen(zFrom)); +} + +/* We encode variable-length integers in little-endian order using seven bits + * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +*/ + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int putVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int getVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int getVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = getVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*** Document lists *** + * + * A document list holds a sorted list of varint-encoded document IDs. + * + * A doclist with type DL_POSITIONS_OFFSETS is stored like this: + * + * array { + * varint docid; + * array { + * varint position; (delta from previous position plus POS_BASE) + * varint startOffset; (delta from previous startOffset) + * varint endOffset; (delta from startOffset) + * } + * } + * + * Here, array { X } means zero or more occurrences of X, adjacent in memory. + * + * A position list may hold positions for text in multiple columns. A position + * POS_COLUMN is followed by a varint containing the index of the column for + * following positions in the list. Any positions appearing before any + * occurrences of POS_COLUMN are for column 0. + * + * A doclist with type DL_POSITIONS is like the above, but holds only docids + * and positions without offset information. + * + * A doclist with type DL_DOCIDS is like the above, but holds only docids + * without positions or offset information. + * + * On disk, every document list has positions and offsets, so we don't bother + * to serialize a doclist's type. + * + * We don't yet delta-encode document IDs; doing so will probably be a + * modest win. + * + * NOTE(shess) I've thought of a slightly (1%) better offset encoding. + * After the first offset, estimate the next offset by using the + * current token position and the previous token position and offset, + * offset to handle some variance. So the estimate would be + * (iPosition*w->iStartOffset/w->iPosition-64), which is delta-encoded + * as normal. Offsets more than 64 chars from the estimate are + * encoded as the delta to the previous start offset + 128. An + * additional tiny increment can be gained by using the end offset of + * the previous token to make the estimate a tiny bit more precise. +*/ + +/* It is not safe to call isspace(), tolower(), or isalnum() on +** hi-bit-set characters. This is the same solution used in the +** tokenizer. +*/ +/* TODO(shess) The snippet-generation code should be using the +** tokenizer-generated tokens rather than doing its own local +** tokenization. +*/ +/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ +static int safe_isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} +static int safe_tolower(char c){ + return (c&0x80)==0 ? tolower(c) : c; +} +static int safe_isalnum(char c){ + return (c&0x80)==0 ? isalnum(c) : 0; +} + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +/* +** By default, only positions and not offsets are stored in the doclists. +** To change this so that offsets are stored too, compile with +** +** -DDL_DEFAULT=DL_POSITIONS_OFFSETS +** +*/ +#ifndef DL_DEFAULT +# define DL_DEFAULT DL_POSITIONS +#endif + +typedef struct DocList { + char *pData; + int nData; + DocListType iType; + int iLastColumn; /* the last column written */ + int iLastPos; /* the last position written */ + int iLastOffset; /* the last start offset written */ +} DocList; + +enum { + POS_END = 0, /* end of this position list */ + POS_COLUMN, /* followed by new column number */ + POS_BASE +}; + +/* Initialize a new DocList to hold the given data. */ +static void docListInit(DocList *d, DocListType iType, + const char *pData, int nData){ + d->nData = nData; + if( nData>0 ){ + d->pData = malloc(nData); + memcpy(d->pData, pData, nData); + } else { + d->pData = NULL; + } + d->iType = iType; + d->iLastColumn = 0; + d->iLastPos = d->iLastOffset = 0; +} + +/* Create a new dynamically-allocated DocList. */ +static DocList *docListNew(DocListType iType){ + DocList *d = (DocList *) malloc(sizeof(DocList)); + docListInit(d, iType, 0, 0); + return d; +} + +static void docListDestroy(DocList *d){ + free(d->pData); +#ifndef NDEBUG + memset(d, 0x55, sizeof(*d)); +#endif +} + +static void docListDelete(DocList *d){ + docListDestroy(d); + free(d); +} + +static char *docListEnd(DocList *d){ + return d->pData + d->nData; +} + +/* Append a varint to a DocList's data. */ +static void appendVarint(DocList *d, sqlite_int64 i){ + char c[VARINT_MAX]; + int n = putVarint(c, i); + d->pData = realloc(d->pData, d->nData + n); + memcpy(d->pData + d->nData, c, n); + d->nData += n; +} + +static void docListAddDocid(DocList *d, sqlite_int64 iDocid){ + appendVarint(d, iDocid); + if( d->iType>=DL_POSITIONS ){ + appendVarint(d, POS_END); /* initially empty position list */ + d->iLastColumn = 0; + d->iLastPos = d->iLastOffset = 0; + } +} + +/* helper function for docListAddPos and docListAddPosOffset */ +static void addPos(DocList *d, int iColumn, int iPos){ + assert( d->nData>0 ); + --d->nData; /* remove previous terminator */ + if( iColumn!=d->iLastColumn ){ + assert( iColumn>d->iLastColumn ); + appendVarint(d, POS_COLUMN); + appendVarint(d, iColumn); + d->iLastColumn = iColumn; + d->iLastPos = d->iLastOffset = 0; + } + assert( iPos>=d->iLastPos ); + appendVarint(d, iPos-d->iLastPos+POS_BASE); + d->iLastPos = iPos; +} + +/* Add a position to the last position list in a doclist. */ +static void docListAddPos(DocList *d, int iColumn, int iPos){ + assert( d->iType==DL_POSITIONS ); + addPos(d, iColumn, iPos); + appendVarint(d, POS_END); /* add new terminator */ +} + +/* +** Add a position and starting and ending offsets to a doclist. +** +** If the doclist is setup to handle only positions, then insert +** the position only and ignore the offsets. +*/ +static void docListAddPosOffset( + DocList *d, /* Doclist under construction */ + int iColumn, /* Column the inserted term is part of */ + int iPos, /* Position of the inserted term */ + int iStartOffset, /* Starting offset of inserted term */ + int iEndOffset /* Ending offset of inserted term */ +){ + assert( d->iType>=DL_POSITIONS ); + addPos(d, iColumn, iPos); + if( d->iType==DL_POSITIONS_OFFSETS ){ + assert( iStartOffset>=d->iLastOffset ); + appendVarint(d, iStartOffset-d->iLastOffset); + d->iLastOffset = iStartOffset; + assert( iEndOffset>=iStartOffset ); + appendVarint(d, iEndOffset-iStartOffset); + } + appendVarint(d, POS_END); /* add new terminator */ +} + +/* +** A DocListReader object is a cursor into a doclist. Initialize +** the cursor to the beginning of the doclist by calling readerInit(). +** Then use routines +** +** peekDocid() +** readDocid() +** readPosition() +** skipPositionList() +** and so forth... +** +** to read information out of the doclist. When we reach the end +** of the doclist, atEnd() returns TRUE. +*/ +typedef struct DocListReader { + DocList *pDoclist; /* The document list we are stepping through */ + char *p; /* Pointer to next unread byte in the doclist */ + int iLastColumn; + int iLastPos; /* the last position read, or -1 when not in a position list */ +} DocListReader; + +/* +** Initialize the DocListReader r to point to the beginning of pDoclist. +*/ +static void readerInit(DocListReader *r, DocList *pDoclist){ + r->pDoclist = pDoclist; + if( pDoclist!=NULL ){ + r->p = pDoclist->pData; + } + r->iLastColumn = -1; + r->iLastPos = -1; +} + +/* +** Return TRUE if we have reached then end of pReader and there is +** nothing else left to read. +*/ +static int atEnd(DocListReader *pReader){ + return pReader->pDoclist==0 || (pReader->p >= docListEnd(pReader->pDoclist)); +} + +/* Peek at the next docid without advancing the read pointer. +*/ +static sqlite_int64 peekDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !atEnd(pReader) ); + assert( pReader->iLastPos==-1 ); + getVarint(pReader->p, &ret); + return ret; +} + +/* Read the next docid. See also nextDocid(). +*/ +static sqlite_int64 readDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !atEnd(pReader) ); + assert( pReader->iLastPos==-1 ); + pReader->p += getVarint(pReader->p, &ret); + if( pReader->pDoclist->iType>=DL_POSITIONS ){ + pReader->iLastColumn = 0; + pReader->iLastPos = 0; + } + return ret; +} + +/* Read the next position and column index from a position list. + * Returns the position, or -1 at the end of the list. */ +static int readPosition(DocListReader *pReader, int *iColumn){ + int i; + int iType = pReader->pDoclist->iType; + + if( pReader->iLastPos==-1 ){ + return -1; + } + assert( !atEnd(pReader) ); + + if( iTypep += getVarint32(pReader->p, &i); + if( i==POS_END ){ + pReader->iLastColumn = pReader->iLastPos = -1; + *iColumn = -1; + return -1; + } + if( i==POS_COLUMN ){ + pReader->p += getVarint32(pReader->p, &pReader->iLastColumn); + pReader->iLastPos = 0; + pReader->p += getVarint32(pReader->p, &i); + assert( i>=POS_BASE ); + } + pReader->iLastPos += ((int) i)-POS_BASE; + if( iType>=DL_POSITIONS_OFFSETS ){ + /* Skip over offsets, ignoring them for now. */ + int iStart, iEnd; + pReader->p += getVarint32(pReader->p, &iStart); + pReader->p += getVarint32(pReader->p, &iEnd); + } + *iColumn = pReader->iLastColumn; + return pReader->iLastPos; +} + +/* Skip past the end of a position list. */ +static void skipPositionList(DocListReader *pReader){ + DocList *p = pReader->pDoclist; + if( p && p->iType>=DL_POSITIONS ){ + int iColumn; + while( readPosition(pReader, &iColumn)!=-1 ){} + } +} + +/* Skip over a docid, including its position list if the doclist has + * positions. */ +static void skipDocument(DocListReader *pReader){ + readDocid(pReader); + skipPositionList(pReader); +} + +/* Skip past all docids which are less than [iDocid]. Returns 1 if a docid + * matching [iDocid] was found. */ +static int skipToDocid(DocListReader *pReader, sqlite_int64 iDocid){ + sqlite_int64 d = 0; + while( !atEnd(pReader) && (d=peekDocid(pReader))iType>=DL_POSITIONS ){ + int iPos, iCol; + const char *zDiv = ""; + printf("("); + while( (iPos = readPosition(&r, &iCol))>=0 ){ + printf("%s%d:%d", zDiv, iCol, iPos); + zDiv = ":"; + } + printf(")"); + } + } + printf("\n"); + fflush(stdout); +} +#endif /* SQLITE_DEBUG */ + +/* Trim the given doclist to contain only positions in column + * [iRestrictColumn]. */ +static void docListRestrictColumn(DocList *in, int iRestrictColumn){ + DocListReader r; + DocList out; + + assert( in->iType>=DL_POSITIONS ); + readerInit(&r, in); + docListInit(&out, DL_POSITIONS, NULL, 0); + + while( !atEnd(&r) ){ + sqlite_int64 iDocid = readDocid(&r); + int iPos, iColumn; + + docListAddDocid(&out, iDocid); + while( (iPos = readPosition(&r, &iColumn)) != -1 ){ + if( iColumn==iRestrictColumn ){ + docListAddPos(&out, iColumn, iPos); + } + } + } + + docListDestroy(in); + *in = out; +} + +/* Trim the given doclist by discarding any docids without any remaining + * positions. */ +static void docListDiscardEmpty(DocList *in) { + DocListReader r; + DocList out; + + /* TODO: It would be nice to implement this operation in place; that + * could save a significant amount of memory in queries with long doclists. */ + assert( in->iType>=DL_POSITIONS ); + readerInit(&r, in); + docListInit(&out, DL_POSITIONS, NULL, 0); + + while( !atEnd(&r) ){ + sqlite_int64 iDocid = readDocid(&r); + int match = 0; + int iPos, iColumn; + while( (iPos = readPosition(&r, &iColumn)) != -1 ){ + if( !match ){ + docListAddDocid(&out, iDocid); + match = 1; + } + docListAddPos(&out, iColumn, iPos); + } + } + + docListDestroy(in); + *in = out; +} + +/* Helper function for docListUpdate() and docListAccumulate(). +** Splices a doclist element into the doclist represented by r, +** leaving r pointing after the newly spliced element. +*/ +static void docListSpliceElement(DocListReader *r, sqlite_int64 iDocid, + const char *pSource, int nSource){ + DocList *d = r->pDoclist; + char *pTarget; + int nTarget, found; + + found = skipToDocid(r, iDocid); + + /* Describe slice in d to place pSource/nSource. */ + pTarget = r->p; + if( found ){ + skipDocument(r); + nTarget = r->p-pTarget; + }else{ + nTarget = 0; + } + + /* The sense of the following is that there are three possibilities. + ** If nTarget==nSource, we should not move any memory nor realloc. + ** If nTarget>nSource, trim target and realloc. + ** If nTargetnSource ){ + memmove(pTarget+nSource, pTarget+nTarget, docListEnd(d)-(pTarget+nTarget)); + } + if( nTarget!=nSource ){ + int iDoclist = pTarget-d->pData; + d->pData = realloc(d->pData, d->nData+nSource-nTarget); + pTarget = d->pData+iDoclist; + } + if( nTargetnData += nSource-nTarget; + r->p = pTarget+nSource; +} + +/* Insert/update pUpdate into the doclist. */ +static void docListUpdate(DocList *d, DocList *pUpdate){ + DocListReader reader; + + assert( d!=NULL && pUpdate!=NULL ); + assert( d->iType==pUpdate->iType); + + readerInit(&reader, d); + docListSpliceElement(&reader, firstDocid(pUpdate), + pUpdate->pData, pUpdate->nData); +} + +/* Propagate elements from pUpdate to pAcc, overwriting elements with +** matching docids. +*/ +static void docListAccumulate(DocList *pAcc, DocList *pUpdate){ + DocListReader accReader, updateReader; + + /* Handle edge cases where one doclist is empty. */ + assert( pAcc!=NULL ); + if( pUpdate==NULL || pUpdate->nData==0 ) return; + if( pAcc->nData==0 ){ + pAcc->pData = malloc(pUpdate->nData); + memcpy(pAcc->pData, pUpdate->pData, pUpdate->nData); + pAcc->nData = pUpdate->nData; + return; + } + + readerInit(&accReader, pAcc); + readerInit(&updateReader, pUpdate); + + while( !atEnd(&updateReader) ){ + char *pSource = updateReader.p; + sqlite_int64 iDocid = readDocid(&updateReader); + skipPositionList(&updateReader); + docListSpliceElement(&accReader, iDocid, pSource, updateReader.p-pSource); + } +} + +/* +** Read the next docid off of pIn. Return 0 if we reach the end. +* +* TODO: This assumes that docids are never 0, but they may actually be 0 since +* users can choose docids when inserting into a full-text table. Fix this. +*/ +static sqlite_int64 nextDocid(DocListReader *pIn){ + skipPositionList(pIn); + return atEnd(pIn) ? 0 : readDocid(pIn); +} + +/* +** pLeft and pRight are two DocListReaders that are pointing to +** positions lists of the same document: iDocid. +** +** If there are no instances in pLeft or pRight where the position +** of pLeft is one less than the position of pRight, then this +** routine adds nothing to pOut. +** +** If there are one or more instances where positions from pLeft +** are exactly one less than positions from pRight, then add a new +** document record to pOut. If pOut wants to hold positions, then +** include the positions from pRight that are one more than a +** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. +** +** pLeft and pRight are left pointing at the next document record. +*/ +static void mergePosList( + DocListReader *pLeft, /* Left position list */ + DocListReader *pRight, /* Right position list */ + sqlite_int64 iDocid, /* The docid from pLeft and pRight */ + DocList *pOut /* Write the merged document record here */ +){ + int iLeftCol, iLeftPos = readPosition(pLeft, &iLeftCol); + int iRightCol, iRightPos = readPosition(pRight, &iRightCol); + int match = 0; + + /* Loop until we've reached the end of both position lists. */ + while( iLeftPos!=-1 && iRightPos!=-1 ){ + if( iLeftCol==iRightCol && iLeftPos+1==iRightPos ){ + if( !match ){ + docListAddDocid(pOut, iDocid); + match = 1; + } + if( pOut->iType>=DL_POSITIONS ){ + docListAddPos(pOut, iRightCol, iRightPos); + } + iLeftPos = readPosition(pLeft, &iLeftCol); + iRightPos = readPosition(pRight, &iRightCol); + }else if( iRightCol=0 ) skipPositionList(pLeft); + if( iRightPos>=0 ) skipPositionList(pRight); +} + +/* We have two doclists: pLeft and pRight. +** Write the phrase intersection of these two doclists into pOut. +** +** A phrase intersection means that two documents only match +** if pLeft.iPos+1==pRight.iPos. +** +** The output pOut may or may not contain positions. If pOut +** does contain positions, they are the positions of pRight. +*/ +static void docListPhraseMerge( + DocList *pLeft, /* Doclist resulting from the words on the left */ + DocList *pRight, /* Doclist for the next word to the right */ + DocList *pOut /* Write the combined doclist here */ +){ + DocListReader left, right; + sqlite_int64 docidLeft, docidRight; + + readerInit(&left, pLeft); + readerInit(&right, pRight); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + + while( docidLeft>0 && docidRight>0 ){ + if( docidLeftiType0 && docidRight>0 ){ + if( docidLeft0 && docidRight>0 ){ + if( docidLeft<=docidRight ){ + docListAddDocid(pOut, docidLeft); + }else{ + docListAddDocid(pOut, docidRight); + } + priorLeft = docidLeft; + if( docidLeft<=docidRight ){ + docidLeft = nextDocid(&left); + } + if( docidRight>0 && docidRight<=priorLeft ){ + docidRight = nextDocid(&right); + } + } + while( docidLeft>0 ){ + docListAddDocid(pOut, docidLeft); + docidLeft = nextDocid(&left); + } + while( docidRight>0 ){ + docListAddDocid(pOut, docidRight); + docidRight = nextDocid(&right); + } +} + +/* We have two doclists: pLeft and pRight. +** Write into pOut all documents that occur in pLeft but not +** in pRight. +** +** Only docids are matched. Position information is ignored. +** +** The output pOut never holds positions. +*/ +static void docListExceptMerge( + DocList *pLeft, /* Doclist resulting from the words on the left */ + DocList *pRight, /* Doclist for the next word to the right */ + DocList *pOut /* Write the combined doclist here */ +){ + DocListReader left, right; + sqlite_int64 docidLeft, docidRight, priorLeft; + + readerInit(&left, pLeft); + readerInit(&right, pRight); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + + while( docidLeft>0 && docidRight>0 ){ + priorLeft = docidLeft; + if( docidLeft0 && docidRight<=priorLeft ){ + docidRight = nextDocid(&right); + } + } + while( docidLeft>0 ){ + docListAddDocid(pOut, docidLeft); + docidLeft = nextDocid(&left); + } +} + +static char *string_dup_n(const char *s, int n){ + char *str = malloc(n + 1); + memcpy(str, s, n); + str[n] = '\0'; + return str; +} + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it's not part of the standard C library and + * may not be available everywhere.) */ +static char *string_dup(const char *s){ + return string_dup_n(s, strlen(s)); +} + +/* Format a string, replacing each occurrence of the % character with + * zDb.zName. This may be more convenient than sqlite_mprintf() + * when one string is used repeatedly in a format string. + * The caller must free() the returned string. */ +static char *string_format(const char *zFormat, + const char *zDb, const char *zName){ + const char *p; + size_t len = 0; + size_t nDb = strlen(zDb); + size_t nName = strlen(zName); + size_t nFullTableName = nDb+1+nName; + char *result; + char *r; + + /* first compute length needed */ + for(p = zFormat ; *p ; ++p){ + len += (*p=='%' ? nFullTableName : 1); + } + len += 1; /* for null terminator */ + + r = result = malloc(len); + for(p = zFormat; *p; ++p){ + if( *p=='%' ){ + memcpy(r, zDb, nDb); + r += nDb; + *r++ = '.'; + memcpy(r, zName, nName); + r += nName; + } else { + *r++ = *p; + } + } + *r++ = '\0'; + assert( r == result + len ); + return result; +} + +static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, + const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + TRACE(("FTS1 sql: %s\n", zCommand)); + rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); + free(zCommand); + return rc; +} + +static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, + sqlite3_stmt **ppStmt, const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + TRACE(("FTS1 prepare: %s\n", zCommand)); + rc = sqlite3_prepare(db, zCommand, -1, ppStmt, NULL); + free(zCommand); + return rc; +} + +/* end utility functions */ + +/* Forward reference */ +typedef struct fulltext_vtab fulltext_vtab; + +/* A single term in a query is represented by an instances of +** the following structure. +*/ +typedef struct QueryTerm { + short int nPhrase; /* How many following terms are part of the same phrase */ + short int iPhrase; /* This is the i-th term of a phrase. */ + short int iColumn; /* Column of the index that must match this term */ + signed char isOr; /* this term is preceded by "OR" */ + signed char isNot; /* this term is preceded by "-" */ + char *pTerm; /* text of the term. '\000' terminated. malloced */ + int nTerm; /* Number of bytes in pTerm[] */ +} QueryTerm; + + +/* A query string is parsed into a Query structure. + * + * We could, in theory, allow query strings to be complicated + * nested expressions with precedence determined by parentheses. + * But none of the major search engines do this. (Perhaps the + * feeling is that an parenthesized expression is two complex of + * an idea for the average user to grasp.) Taking our lead from + * the major search engines, we will allow queries to be a list + * of terms (with an implied AND operator) or phrases in double-quotes, + * with a single optional "-" before each non-phrase term to designate + * negation and an optional OR connector. + * + * OR binds more tightly than the implied AND, which is what the + * major search engines seem to do. So, for example: + * + * [one two OR three] ==> one AND (two OR three) + * [one OR two three] ==> (one OR two) AND three + * + * A "-" before a term matches all entries that lack that term. + * The "-" must occur immediately before the term with in intervening + * space. This is how the search engines do it. + * + * A NOT term cannot be the right-hand operand of an OR. If this + * occurs in the query string, the NOT is ignored: + * + * [one OR -two] ==> one OR two + * + */ +typedef struct Query { + fulltext_vtab *pFts; /* The full text index */ + int nTerms; /* Number of terms in the query */ + QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ + int nextIsOr; /* Set the isOr flag on the next inserted term */ + int nextColumn; /* Next word parsed must be in this column */ + int dfltColumn; /* The default column */ +} Query; + + +/* +** An instance of the following structure keeps track of generated +** matching-word offset information and snippets. +*/ +typedef struct Snippet { + int nMatch; /* Total number of matches */ + int nAlloc; /* Space allocated for aMatch[] */ + struct snippetMatch { /* One entry for each matching term */ + char snStatus; /* Status flag for use while constructing snippets */ + short int iCol; /* The column that contains the match */ + short int iTerm; /* The index in Query.pTerms[] of the matching term */ + short int nByte; /* Number of bytes in the term */ + int iStart; /* The offset to the first character of the term */ + } *aMatch; /* Points to space obtained from malloc */ + char *zOffset; /* Text rendering of aMatch[] */ + int nOffset; /* strlen(zOffset) */ + char *zSnippet; /* Snippet text */ + int nSnippet; /* strlen(zSnippet) */ +} Snippet; + + +typedef enum QueryType { + QUERY_GENERIC, /* table scan */ + QUERY_ROWID, /* lookup by rowid */ + QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ +} QueryType; + +/* TODO(shess) CHUNK_MAX controls how much data we allow in segment 0 +** before we start aggregating into larger segments. Lower CHUNK_MAX +** means that for a given input we have more individual segments per +** term, which means more rows in the table and a bigger index (due to +** both more rows and bigger rowids). But it also reduces the average +** cost of adding new elements to the segment 0 doclist, and it seems +** to reduce the number of pages read and written during inserts. 256 +** was chosen by measuring insertion times for a certain input (first +** 10k documents of Enron corpus), though including query performance +** in the decision may argue for a larger value. +*/ +#define CHUNK_MAX 256 + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_UPDATE_STMT, + CONTENT_DELETE_STMT, + + TERM_SELECT_STMT, + TERM_SELECT_ALL_STMT, + TERM_INSERT_STMT, + TERM_UPDATE_STMT, + TERM_DELETE_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(adam): Is there some risk that a statement (in particular, +** pTermSelectStmt) will be used in two cursors at once, e.g. if a +** query joins a virtual table to itself? If so perhaps we should +** move some of these to the cursor object. +*/ +static const char *const fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ + /* CONTENT_SELECT */ "select * from %_content where rowid = ?", + /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ + /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + + /* TERM_SELECT */ + "select rowid, doclist from %_term where term = ? and segment = ?", + /* TERM_SELECT_ALL */ + "select doclist from %_term where term = ? order by segment", + /* TERM_INSERT */ + "insert into %_term (rowid, term, segment, doclist) values (?, ?, ?, ?)", + /* TERM_UPDATE */ "update %_term set doclist = ? where rowid = ?", + /* TERM_DELETE */ "delete from %_term where rowid = ?", +}; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct fulltext_vtab { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of columns in virtual table */ + char **azColumn; /* column names. malloced */ + char **azContentColumn; /* column names in content table; malloced */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; +}; + +/* +** When the core wants to do a query, it create a cursor using a +** call to xOpen. This structure is an instance of a cursor. It +** is destroyed by xClose. +*/ +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + int eof; /* True if at End Of Results */ + Query q; /* Parsed query string */ + Snippet snippet; /* Cached snippet for the current row */ + int iColumn; /* Column being searched */ + DocListReader result; /* used when iCursorType == QUERY_FULLTEXT */ +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static const sqlite3_module fulltextModule; /* forward declaration */ + +/* Append a list of strings separated by commas to a StringBuffer. */ +static void appendList(StringBuffer *sb, int nString, char **azString){ + int i; + for(i=0; i0 ) append(sb, ", "); + append(sb, azString[i]); + } +} + +/* Return a dynamically generated statement of the form + * insert into %_content (rowid, ...) values (?, ...) + */ +static const char *contentInsertStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "insert into %_content (rowid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, ") values (?"); + for(i=0; inColumn; ++i) + append(&sb, ", ?"); + append(&sb, ")"); + return sb.s; +} + +/* Return a dynamically generated statement of the form + * update %_content set [col_0] = ?, [col_1] = ?, ... + * where rowid = ? + */ +static const char *contentUpdateStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "update %_content set "); + for(i=0; inColumn; ++i) { + if( i>0 ){ + append(&sb, ", "); + } + append(&sb, v->azContentColumn[i]); + append(&sb, " = ?"); + } + append(&sb, " where rowid = ?"); + return sb.s; +} + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmtpFulltextStatements[iStmt]==NULL ){ + const char *zStmt; + int rc; + switch( iStmt ){ + case CONTENT_INSERT_STMT: + zStmt = contentInsertStatement(v); break; + case CONTENT_UPDATE_STMT: + zStmt = contentUpdateStatement(v); break; + default: + zStmt = fulltext_zStatement[iStmt]; + } + rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], + zStmt); + if( zStmt != fulltext_zStatement[iStmt]) free((void *) zStmt); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Step the indicated statement, handling errors SQLITE_BUSY (by +** retrying) and SQLITE_SCHEMA (by re-preparing and transferring +** bindings to the new statement). +** TODO(adam): We should extend this function so that it can work with +** statements declared locally, not only globally cached statements. +*/ +static int sql_step_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc; + sqlite3_stmt *s = *ppStmt; + assert( iStmtpFulltextStatements[iStmt] ); + + while( (rc=sqlite3_step(s))!=SQLITE_DONE && rc!=SQLITE_ROW ){ + if( rc==SQLITE_BUSY ) continue; + if( rc!=SQLITE_ERROR ) return rc; + + /* If an SQLITE_SCHEMA error has occured, then finalizing this + * statement is going to delete the fulltext_vtab structure. If + * the statement just executed is in the pFulltextStatements[] + * array, it will be finalized twice. So remove it before + * calling sqlite3_finalize(). + */ + v->pFulltextStatements[iStmt] = NULL; + rc = sqlite3_finalize(s); + break; + } + return rc; + + err: + sqlite3_finalize(s); + return rc; +} + +/* Like sql_step_statement(), but convert SQLITE_DONE to SQLITE_OK. +** Useful for statements like UPDATE, where we expect no results. +*/ +static int sql_single_step_statement(fulltext_vtab *v, + fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc = sql_step_statement(v, iStmt, ppStmt); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ +static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, + sqlite3_value **pValues){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 2+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + return sql_single_step_statement(v, CONTENT_INSERT_STMT, &s); +} + +/* update %_content set col0 = pValues[0], col1 = pValues[1], ... + * where rowid = [iRowid] */ +static int content_update(fulltext_vtab *v, sqlite3_value **pValues, + sqlite_int64 iRowid){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 1+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_UPDATE_STMT, &s); +} + +static void freeStringArray(int nString, const char **pString){ + int i; + + for (i=0 ; i < nString ; ++i) { + if( pString[i]!=NULL ) free((void *) pString[i]); + } + free((void *) pString); +} + +/* select * from %_content where rowid = [iRow] + * The caller must delete the returned array and all strings in it. + * null fields will be NULL in the returned array. + * + * TODO: Perhaps we should return pointer/length strings here for consistency + * with other code which uses pointer/length. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iRow, + const char ***pValues){ + sqlite3_stmt *s; + const char **values; + int i; + int rc; + + *pValues = NULL; + + rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc; + + values = (const char **) malloc(v->nColumn * sizeof(const char *)); + for(i=0; inColumn; ++i){ + if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + values[i] = NULL; + }else{ + values[i] = string_dup((char*)sqlite3_column_text(s, i)); + } + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + *pValues = values; + return SQLITE_OK; + } + + freeStringArray(v->nColumn, values); + return rc; +} + +/* delete from %_content where rowid = [iRow ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_DELETE_STMT, &s); +} + +/* select rowid, doclist from %_term + * where term = [pTerm] and segment = [iSegment] + * If found, returns SQLITE_ROW; the caller must free the + * returned doclist. If no rows found, returns SQLITE_DONE. */ +static int term_select(fulltext_vtab *v, const char *pTerm, int nTerm, + int iSegment, + sqlite_int64 *rowid, DocList *out){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, pTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 2, iSegment); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc; + + *rowid = sqlite3_column_int64(s, 0); + docListInit(out, DL_DEFAULT, + sqlite3_column_blob(s, 1), sqlite3_column_bytes(s, 1)); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + return rc==SQLITE_DONE ? SQLITE_ROW : rc; +} + +/* Load the segment doclists for term pTerm and merge them in +** appropriate order into out. Returns SQLITE_OK if successful. If +** there are no segments for pTerm, successfully returns an empty +** doclist in out. +** +** Each document consists of 1 or more "columns". The number of +** columns is v->nColumn. If iColumn==v->nColumn, then return +** position information about all columns. If iColumnnColumn, +** then only return position information about the iColumn-th column +** (where the first column is 0). +*/ +static int term_select_all( + fulltext_vtab *v, /* The fulltext index we are querying against */ + int iColumn, /* If nColumn ){ /* querying a single column */ + docListRestrictColumn(&old, iColumn); + } + + /* doclist contains the newer data, so write it over old. Then + ** steal accumulated result for doclist. + */ + docListAccumulate(&old, &doclist); + docListDestroy(&doclist); + doclist = old; + } + if( rc!=SQLITE_DONE ){ + docListDestroy(&doclist); + return rc; + } + + docListDiscardEmpty(&doclist); + *out = doclist; + return SQLITE_OK; +} + +/* insert into %_term (rowid, term, segment, doclist) + values ([piRowid], [pTerm], [iSegment], [doclist]) +** Lets sqlite select rowid if piRowid is NULL, else uses *piRowid. +** +** NOTE(shess) piRowid is IN, with values of "space of int64" plus +** null, it is not used to pass data back to the caller. +*/ +static int term_insert(fulltext_vtab *v, sqlite_int64 *piRowid, + const char *pTerm, int nTerm, + int iSegment, DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + if( piRowid==NULL ){ + rc = sqlite3_bind_null(s, 1); + }else{ + rc = sqlite3_bind_int64(s, 1, *piRowid); + } + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 2, pTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 3, iSegment); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 4, doclist->pData, doclist->nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_INSERT_STMT, &s); +} + +/* update %_term set doclist = [doclist] where rowid = [rowid] */ +static int term_update(fulltext_vtab *v, sqlite_int64 rowid, + DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, doclist->pData, doclist->nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_UPDATE_STMT, &s); +} + +static int term_delete(fulltext_vtab *v, sqlite_int64 rowid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_DELETE_STMT, &s); +} + +/* +** Free the memory used to contain a fulltext_vtab structure. +*/ +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt, i; + + TRACE(("FTS1 Destroy %p\n", v)); + for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + free(v->azColumn); + for(i = 0; i < v->nColumn; ++i) { + sqlite3_free(v->azContentColumn[i]); + } + free(v->azContentColumn); + free(v); +} + +/* +** Token types for parsing the arguments to xConnect or xCreate. +*/ +#define TOKEN_EOF 0 /* End of file */ +#define TOKEN_SPACE 1 /* Any kind of whitespace */ +#define TOKEN_ID 2 /* An identifier */ +#define TOKEN_STRING 3 /* A string literal */ +#define TOKEN_PUNCT 4 /* A single punctuation character */ + +/* +** If X is a character that can be used in an identifier then +** IdChar(X) will be true. Otherwise it is false. +** +** For ASCII, any character with the high-order bit set is +** allowed in an identifier. For 7-bit characters, +** sqlite3IsIdChar[X] must be 1. +** +** Ticket #1066. the SQL standard does not allow '$' in the +** middle of identfiers. But many SQL implementations do. +** SQLite will allow '$' in identifiers for compatibility. +** But the feature is undocumented. +*/ +static const char isIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) + + +/* +** Return the length of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +static int getToken(const char *z, int *tokenType){ + int i, c; + switch( *z ){ + case 0: { + *tokenType = TOKEN_EOF; + return 0; + } + case ' ': case '\t': case '\n': case '\f': case '\r': { + for(i=1; safe_isspace(z[i]); i++){} + *tokenType = TOKEN_SPACE; + return i; + } + case '`': + case '\'': + case '"': { + int delim = z[0]; + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + *tokenType = TOKEN_STRING; + return i + (c!=0); + } + case '[': { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = TOKEN_ID; + return i; + } + default: { + if( !IdChar(*z) ){ + break; + } + for(i=1; IdChar(z[i]); i++){} + *tokenType = TOKEN_ID; + return i; + } + } + *tokenType = TOKEN_PUNCT; + return 1; +} + +/* +** A token extracted from a string is an instance of the following +** structure. +*/ +typedef struct Token { + const char *z; /* Pointer to token text. Not '\000' terminated */ + short int n; /* Length of the token text in bytes. */ +} Token; + +/* +** Given a input string (which is really one of the argv[] parameters +** passed into xConnect or xCreate) split the string up into tokens. +** Return an array of pointers to '\000' terminated strings, one string +** for each non-whitespace token. +** +** The returned array is terminated by a single NULL pointer. +** +** Space to hold the returned array is obtained from a single +** malloc and should be freed by passing the return value to free(). +** The individual strings within the token list are all a part of +** the single memory allocation and will all be freed at once. +*/ +static char **tokenizeString(const char *z, int *pnToken){ + int nToken = 0; + Token *aToken = malloc( strlen(z) * sizeof(aToken[0]) ); + int n = 1; + int e, i; + int totalSize = 0; + char **azToken; + char *zCopy; + while( n>0 ){ + n = getToken(z, &e); + if( e!=TOKEN_SPACE ){ + aToken[nToken].z = z; + aToken[nToken].n = n; + nToken++; + totalSize += n+1; + } + z += n; + } + azToken = (char**)malloc( nToken*sizeof(char*) + totalSize ); + zCopy = (char*)&azToken[nToken]; + nToken--; + for(i=0; i=0 ){ + azIn[j] = azIn[i]; + } + j++; + } + } + azIn[j] = 0; + } +} + + +/* +** Find the first alphanumeric token in the string zIn. Null-terminate +** this token. Remove any quotation marks. And return a pointer to +** the result. +*/ +static char *firstToken(char *zIn, char **pzTail){ + int n, ttype; + while(1){ + n = getToken(zIn, &ttype); + if( ttype==TOKEN_SPACE ){ + zIn += n; + }else if( ttype==TOKEN_EOF ){ + *pzTail = zIn; + return 0; + }else{ + zIn[n] = 0; + *pzTail = &zIn[1]; + dequoteString(zIn); + return zIn; + } + } + /*NOTREACHED*/ +} + +/* Return true if... +** +** * s begins with the string t, ignoring case +** * s is longer than t +** * The first character of s beyond t is not a alphanumeric +** +** Ignore leading space in *s. +** +** To put it another way, return true if the first token of +** s[] is t[]. +*/ +static int startsWith(const char *s, const char *t){ + while( safe_isspace(*s) ){ s++; } + while( *t ){ + if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; + } + return *s!='_' && !safe_isalnum(*s); +} + +/* +** An instance of this structure defines the "spec" of a +** full text index. This structure is populated by parseSpec +** and use by fulltextConnect and fulltextCreate. +*/ +typedef struct TableSpec { + const char *zDb; /* Logical database name */ + const char *zName; /* Name of the full-text index */ + int nColumn; /* Number of columns to be indexed */ + char **azColumn; /* Original names of columns to be indexed */ + char **azContentColumn; /* Column names for %_content */ + char **azTokenizer; /* Name of tokenizer and its arguments */ +} TableSpec; + +/* +** Reclaim all of the memory used by a TableSpec +*/ +static void clearTableSpec(TableSpec *p) { + free(p->azColumn); + free(p->azContentColumn); + free(p->azTokenizer); +} + +/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: + * + * CREATE VIRTUAL TABLE email + * USING fts1(subject, body, tokenize mytokenizer(myarg)) + * + * We return parsed information in a TableSpec structure. + * + */ +static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, + char**pzErr){ + int i, n; + char *z, *zDummy; + char **azArg; + const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ + + assert( argc>=3 ); + /* Current interface: + ** argv[0] - module name + ** argv[1] - database name + ** argv[2] - table name + ** argv[3..] - columns, optionally followed by tokenizer specification + ** and snippet delimiters specification. + */ + + /* Make a copy of the complete argv[][] array in a single allocation. + ** The argv[][] array is read-only and transient. We can write to the + ** copy in order to modify things and the copy is persistent. + */ + memset(pSpec, 0, sizeof(*pSpec)); + for(i=n=0; izDb = azArg[1]; + pSpec->zName = azArg[2]; + pSpec->nColumn = 0; + pSpec->azColumn = azArg; + zTokenizer = "tokenize simple"; + for(i=3; inColumn] = firstToken(azArg[i], &zDummy); + pSpec->nColumn++; + } + } + if( pSpec->nColumn==0 ){ + azArg[0] = "content"; + pSpec->nColumn = 1; + } + + /* + ** Construct the list of content column names. + ** + ** Each content column name will be of the form cNNAAAA + ** where NN is the column number and AAAA is the sanitized + ** column name. "sanitized" means that special characters are + ** converted to "_". The cNN prefix guarantees that all column + ** names are unique. + ** + ** The AAAA suffix is not strictly necessary. It is included + ** for the convenience of people who might examine the generated + ** %_content table and wonder what the columns are used for. + */ + pSpec->azContentColumn = malloc( pSpec->nColumn * sizeof(char *) ); + if( pSpec->azContentColumn==0 ){ + clearTableSpec(pSpec); + return SQLITE_NOMEM; + } + for(i=0; inColumn; i++){ + char *p; + pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); + for (p = pSpec->azContentColumn[i]; *p ; ++p) { + if( !safe_isalnum(*p) ) *p = '_'; + } + } + + /* + ** Parse the tokenizer specification string. + */ + pSpec->azTokenizer = tokenizeString(zTokenizer, &n); + tokenListToIdList(pSpec->azTokenizer); + + return SQLITE_OK; +} + +/* +** Generate a CREATE TABLE statement that describes the schema of +** the virtual table. Return a pointer to this schema string. +** +** Space is obtained from sqlite3_mprintf() and should be freed +** using sqlite3_free(). +*/ +static char *fulltextSchema( + int nColumn, /* Number of columns */ + const char *const* azColumn, /* List of columns */ + const char *zTableName /* Name of the table */ +){ + int i; + char *zSchema, *zNext; + const char *zSep = "("; + zSchema = sqlite3_mprintf("CREATE TABLE x"); + for(i=0; ibase */ + v->db = db; + v->zDb = spec->zDb; /* Freed when azColumn is freed */ + v->zName = spec->zName; /* Freed when azColumn is freed */ + v->nColumn = spec->nColumn; + v->azContentColumn = spec->azContentColumn; + spec->azContentColumn = 0; + v->azColumn = spec->azColumn; + spec->azColumn = 0; + + if( spec->azTokenizer==0 ){ + return SQLITE_NOMEM; + } + /* TODO(shess) For now, add new tokenizers as else if clauses. */ + if( spec->azTokenizer[0]==0 || startsWith(spec->azTokenizer[0], "simple") ){ + sqlite3Fts1SimpleTokenizerModule(&m); + }else if( startsWith(spec->azTokenizer[0], "porter") ){ + sqlite3Fts1PorterTokenizerModule(&m); + }else{ + *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); + rc = SQLITE_ERROR; + goto err; + } + for(n=0; spec->azTokenizer[n]; n++){} + if( n ){ + rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], + &v->pTokenizer); + }else{ + rc = m->xCreate(0, 0, &v->pTokenizer); + } + if( rc!=SQLITE_OK ) goto err; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, + spec->zName); + rc = sqlite3_declare_vtab(db, schema); + sqlite3_free(schema); + if( rc!=SQLITE_OK ) goto err; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + *ppVTab = &v->base; + TRACE(("FTS1 Connect %p\n", v)); + + return rc; + +err: + fulltext_vtab_destroy(v); + return rc; +} + +static int fulltextConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, + char **pzErr +){ + TableSpec spec; + int rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + rc = constructVtab(db, &spec, ppVTab, pzErr); + clearTableSpec(&spec); + return rc; +} + + /* The %_content table holds the text of each document, with + ** the rowid used as the docid. + ** + ** The %_term table maps each term to a document list blob + ** containing elements sorted by ascending docid, each element + ** encoded as: + ** + ** docid varint-encoded + ** token elements: + ** position+1 varint-encoded as delta from previous position + ** start offset varint-encoded as delta from previous start offset + ** end offset varint-encoded as delta from start offset + ** + ** The sentinel position of 0 indicates the end of the token list. + ** + ** Additionally, doclist blobs are chunked into multiple segments, + ** using segment to order the segments. New elements are added to + ** the segment at segment 0, until it exceeds CHUNK_MAX. Then + ** segment 0 is deleted, and the doclist is inserted at segment 1. + ** If there is already a doclist at segment 1, the segment 0 doclist + ** is merged with it, the segment 1 doclist is deleted, and the + ** merged doclist is inserted at segment 2, repeating those + ** operations until an insert succeeds. + ** + ** Since this structure doesn't allow us to update elements in place + ** in case of deletion or update, these are simply written to + ** segment 0 (with an empty token list in case of deletion), with + ** docListAccumulate() taking care to retain lower-segment + ** information in preference to higher-segment information. + */ + /* TODO(shess) Provide a VACUUM type operation which both removes + ** deleted elements which are no longer necessary, and duplicated + ** elements. I suspect this will probably not be necessary in + ** practice, though. + */ +static int fulltextCreate(sqlite3 *db, void *pAux, + int argc, const char * const *argv, + sqlite3_vtab **ppVTab, char **pzErr){ + int rc; + TableSpec spec; + StringBuffer schema; + TRACE(("FTS1 Create\n")); + + rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + initStringBuffer(&schema); + append(&schema, "CREATE TABLE %_content("); + appendList(&schema, spec.nColumn, spec.azContentColumn); + append(&schema, ")"); + rc = sql_exec(db, spec.zDb, spec.zName, schema.s); + free(schema.s); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_term(term text, segment integer, doclist blob, " + "primary key(term, segment));"); + if( rc!=SQLITE_OK ) goto out; + + rc = constructVtab(db, &spec, ppVTab, pzErr); + +out: + clearTableSpec(&spec); + return rc; +} + +/* Decide how to handle an SQL query. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + int i; + TRACE(("FTS1 BestIndex\n")); + + for(i=0; inConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->usable ) { + if( pConstraint->iColumn==-1 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ + TRACE(("FTS1 QUERY_ROWID\n")); + } else if( pConstraint->iColumn>=0 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* full-text search */ + pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; + TRACE(("FTS1 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); + } else continue; + + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + + /* An arbitrary value for now. + * TODO: Perhaps rowid matches should be considered cheaper than + * full-text searches. */ + pInfo->estimatedCost = 1.0; + + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + TRACE(("FTS1 Disconnect %p\n", pVTab)); + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int rc; + + TRACE(("FTS1 Destroy %p\n", pVTab)); + rc = sql_exec(v->db, v->zDb, v->zName, + "drop table if exists %_content;" + "drop table if exists %_term;" + ); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + TRACE(("FTS1 Open %p: %p\n", pVTab, c)); + + return SQLITE_OK; +} + + +/* Free all of the dynamically allocated memory held by *q +*/ +static void queryClear(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + free(q->pTerms[i].pTerm); + } + free(q->pTerms); + memset(q, 0, sizeof(*q)); +} + +/* Free all of the dynamically allocated memory held by the +** Snippet +*/ +static void snippetClear(Snippet *p){ + free(p->aMatch); + free(p->zOffset); + free(p->zSnippet); + memset(p, 0, sizeof(*p)); +} +/* +** Append a single entry to the p->aMatch[] log. +*/ +static void snippetAppendMatch( + Snippet *p, /* Append the entry to this snippet */ + int iCol, int iTerm, /* The column and query term */ + int iStart, int nByte /* Offset and size of the match */ +){ + int i; + struct snippetMatch *pMatch; + if( p->nMatch+1>=p->nAlloc ){ + p->nAlloc = p->nAlloc*2 + 10; + p->aMatch = realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + if( p->aMatch==0 ){ + p->nMatch = 0; + p->nAlloc = 0; + return; + } + } + i = p->nMatch++; + pMatch = &p->aMatch[i]; + pMatch->iCol = iCol; + pMatch->iTerm = iTerm; + pMatch->iStart = iStart; + pMatch->nByte = nByte; +} + +/* +** Sizing information for the circular buffer used in snippetOffsetsOfColumn() +*/ +#define FTS1_ROTOR_SZ (32) +#define FTS1_ROTOR_MASK (FTS1_ROTOR_SZ-1) + +/* +** Add entries to pSnippet->aMatch[] for every match that occurs against +** document zDoc[0..nDoc-1] which is stored in column iColumn. +*/ +static void snippetOffsetsOfColumn( + Query *pQuery, + Snippet *pSnippet, + int iColumn, + const char *zDoc, + int nDoc +){ + const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ + sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ + sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ + fulltext_vtab *pVtab; /* The full text index */ + int nColumn; /* Number of columns in the index */ + const QueryTerm *aTerm; /* Query string terms */ + int nTerm; /* Number of query string terms */ + int i, j; /* Loop counters */ + int rc; /* Return code */ + unsigned int match, prevMatch; /* Phrase search bitmasks */ + const char *zToken; /* Next token from the tokenizer */ + int nToken; /* Size of zToken */ + int iBegin, iEnd, iPos; /* Offsets of beginning and end */ + + /* The following variables keep a circular buffer of the last + ** few tokens */ + unsigned int iRotor = 0; /* Index of current token */ + int iRotorBegin[FTS1_ROTOR_SZ]; /* Beginning offset of token */ + int iRotorLen[FTS1_ROTOR_SZ]; /* Length of token */ + + pVtab = pQuery->pFts; + nColumn = pVtab->nColumn; + pTokenizer = pVtab->pTokenizer; + pTModule = pTokenizer->pModule; + rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); + if( rc ) return; + pTCursor->pTokenizer = pTokenizer; + aTerm = pQuery->pTerms; + nTerm = pQuery->nTerms; + if( nTerm>=FTS1_ROTOR_SZ ){ + nTerm = FTS1_ROTOR_SZ - 1; + } + prevMatch = 0; + while(1){ + rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); + if( rc ) break; + iRotorBegin[iRotor&FTS1_ROTOR_MASK] = iBegin; + iRotorLen[iRotor&FTS1_ROTOR_MASK] = iEnd-iBegin; + match = 0; + for(i=0; i=0 && iCol1 && (prevMatch & (1<=0; j--){ + int k = (iRotor-j) & FTS1_ROTOR_MASK; + snippetAppendMatch(pSnippet, iColumn, i-j, + iRotorBegin[k], iRotorLen[k]); + } + } + } + prevMatch = match<<1; + iRotor++; + } + pTModule->xClose(pTCursor); +} + + +/* +** Compute all offsets for the current row of the query. +** If the offsets have already been computed, this routine is a no-op. +*/ +static void snippetAllOffsets(fulltext_cursor *p){ + int nColumn; + int iColumn, i; + int iFirst, iLast; + fulltext_vtab *pFts; + + if( p->snippet.nMatch ) return; + if( p->q.nTerms==0 ) return; + pFts = p->q.pFts; + nColumn = pFts->nColumn; + iColumn = p->iCursorType - QUERY_FULLTEXT; + if( iColumn<0 || iColumn>=nColumn ){ + iFirst = 0; + iLast = nColumn-1; + }else{ + iFirst = iColumn; + iLast = iColumn; + } + for(i=iFirst; i<=iLast; i++){ + const char *zDoc; + int nDoc; + zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); + nDoc = sqlite3_column_bytes(p->pStmt, i+1); + snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); + } +} + +/* +** Convert the information in the aMatch[] array of the snippet +** into the string zOffset[0..nOffset-1]. +*/ +static void snippetOffsetText(Snippet *p){ + int i; + int cnt = 0; + StringBuffer sb; + char zBuf[200]; + if( p->zOffset ) return; + initStringBuffer(&sb); + for(i=0; inMatch; i++){ + struct snippetMatch *pMatch = &p->aMatch[i]; + zBuf[0] = ' '; + sprintf(&zBuf[cnt>0], "%d %d %d %d", pMatch->iCol, + pMatch->iTerm, pMatch->iStart, pMatch->nByte); + append(&sb, zBuf); + cnt++; + } + p->zOffset = sb.s; + p->nOffset = sb.len; +} + +/* +** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set +** of matching words some of which might be in zDoc. zDoc is column +** number iCol. +** +** iBreak is suggested spot in zDoc where we could begin or end an +** excerpt. Return a value similar to iBreak but possibly adjusted +** to be a little left or right so that the break point is better. +*/ +static int wordBoundary( + int iBreak, /* The suggested break point */ + const char *zDoc, /* Document text */ + int nDoc, /* Number of bytes in zDoc[] */ + struct snippetMatch *aMatch, /* Matching words */ + int nMatch, /* Number of entries in aMatch[] */ + int iCol /* The column number for zDoc[] */ +){ + int i; + if( iBreak<=10 ){ + return 0; + } + if( iBreak>=nDoc-10 ){ + return nDoc; + } + for(i=0; i0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ + return aMatch[i-1].iStart; + } + } + for(i=1; i<=10; i++){ + if( safe_isspace(zDoc[iBreak-i]) ){ + return iBreak - i + 1; + } + if( safe_isspace(zDoc[iBreak+i]) ){ + return iBreak + i + 1; + } + } + return iBreak; +} + +/* +** If the StringBuffer does not end in white space, add a single +** space character to the end. +*/ +static void appendWhiteSpace(StringBuffer *p){ + if( p->len==0 ) return; + if( safe_isspace(p->s[p->len-1]) ) return; + append(p, " "); +} + +/* +** Remove white space from teh end of the StringBuffer +*/ +static void trimWhiteSpace(StringBuffer *p){ + while( p->len>0 && safe_isspace(p->s[p->len-1]) ){ + p->len--; + } +} + + + +/* +** Allowed values for Snippet.aMatch[].snStatus +*/ +#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ +#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ + +/* +** Generate the text of a snippet. +*/ +static void snippetText( + fulltext_cursor *pCursor, /* The cursor we need the snippet for */ + const char *zStartMark, /* Markup to appear before each match */ + const char *zEndMark, /* Markup to appear after each match */ + const char *zEllipsis /* Ellipsis mark */ +){ + int i, j; + struct snippetMatch *aMatch; + int nMatch; + int nDesired; + StringBuffer sb; + int tailCol; + int tailOffset; + int iCol; + int nDoc; + const char *zDoc; + int iStart, iEnd; + int tailEllipsis = 0; + int iMatch; + + + free(pCursor->snippet.zSnippet); + pCursor->snippet.zSnippet = 0; + aMatch = pCursor->snippet.aMatch; + nMatch = pCursor->snippet.nMatch; + initStringBuffer(&sb); + + for(i=0; iq.nTerms; i++){ + for(j=0; j0; i++){ + if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; + nDesired--; + iCol = aMatch[i].iCol; + zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); + iStart = aMatch[i].iStart - 40; + iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); + if( iStart<=10 ){ + iStart = 0; + } + if( iCol==tailCol && iStart<=tailOffset+20 ){ + iStart = tailOffset; + } + if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ + trimWhiteSpace(&sb); + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + appendWhiteSpace(&sb); + } + iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; + iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); + if( iEnd>=nDoc-10 ){ + iEnd = nDoc; + tailEllipsis = 0; + }else{ + tailEllipsis = 1; + } + while( iMatchsnippet.zSnippet = sb.s; + pCursor->snippet.nSnippet = sb.len; +} + + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + TRACE(("FTS1 Close %p\n", c)); + sqlite3_finalize(c->pStmt); + queryClear(&c->q); + snippetClear(&c->snippet); + if( c->result.pDoclist!=NULL ){ + docListDelete(c->result.pDoclist); + } + free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + sqlite_int64 iDocid; + int rc; + + TRACE(("FTS1 Next %p\n", pCursor)); + snippetClear(&c->snippet); + if( c->iCursorType < QUERY_FULLTEXT ){ + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + } else { /* full-text query */ + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + iDocid = nextDocid(&c->result); + if( iDocid==0 ){ + c->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_bind_int64(c->pStmt, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + } +} + + +/* Return a DocList corresponding to the query term *pTerm. If *pTerm +** is the first term of a phrase query, go ahead and evaluate the phrase +** query and return the doclist for the entire phrase query. +** +** The result is stored in pTerm->doclist. +*/ +static int docListOfTerm( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* column to restrict to. No restrition if >=nColumn */ + QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ + DocList **ppResult /* Write the result here */ +){ + DocList *pLeft, *pRight, *pNew; + int i, rc; + + pLeft = docListNew(DL_POSITIONS); + rc = term_select_all(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pLeft); + if( rc ){ + docListDelete(pLeft); + return rc; + } + for(i=1; i<=pQTerm->nPhrase; i++){ + pRight = docListNew(DL_POSITIONS); + rc = term_select_all(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, pRight); + if( rc ){ + docListDelete(pLeft); + return rc; + } + pNew = docListNew(inPhrase ? DL_POSITIONS : DL_DOCIDS); + docListPhraseMerge(pLeft, pRight, pNew); + docListDelete(pLeft); + docListDelete(pRight); + pLeft = pNew; + } + *ppResult = pLeft; + return SQLITE_OK; +} + +/* Add a new term pTerm[0..nTerm-1] to the query *q. +*/ +static void queryAdd(Query *q, const char *pTerm, int nTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerms = realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); + if( q->pTerms==0 ){ + q->nTerms = 0; + return; + } + t = &q->pTerms[q->nTerms - 1]; + memset(t, 0, sizeof(*t)); + t->pTerm = malloc(nTerm+1); + memcpy(t->pTerm, pTerm, nTerm); + t->pTerm[nTerm] = 0; + t->nTerm = nTerm; + t->isOr = q->nextIsOr; + q->nextIsOr = 0; + t->iColumn = q->nextColumn; + q->nextColumn = q->dfltColumn; +} + +/* +** Check to see if the string zToken[0...nToken-1] matches any +** column name in the virtual table. If it does, +** return the zero-indexed column number. If not, return -1. +*/ +static int checkColumnSpecifier( + fulltext_vtab *pVtab, /* The virtual table */ + const char *zToken, /* Text of the token */ + int nToken /* Number of characters in the token */ +){ + int i; + for(i=0; inColumn; i++){ + if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 + && pVtab->azColumn[i][nToken]==0 ){ + return i; + } + } + return -1; +} + +/* +** Parse the text at pSegment[0..nSegment-1]. Add additional terms +** to the query being assemblied in pQuery. +** +** inPhrase is true if pSegment[0..nSegement-1] is contained within +** double-quotes. If inPhrase is true, then the first term +** is marked with the number of terms in the phrase less one and +** OR and "-" syntax is ignored. If inPhrase is false, then every +** term found is marked with nPhrase=0 and OR and "-" syntax is significant. +*/ +static int tokenizeSegment( + sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ + const char *pSegment, int nSegment, /* Query expression being parsed */ + int inPhrase, /* True if within "..." */ + Query *pQuery /* Append results here */ +){ + const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int firstIndex = pQuery->nTerms; + int iCol; + int nTerm = 1; + + int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *pToken; + int nToken, iBegin, iEnd, iPos; + + rc = pModule->xNext(pCursor, + &pToken, &nToken, + &iBegin, &iEnd, &iPos); + if( rc!=SQLITE_OK ) break; + if( !inPhrase && + pSegment[iEnd]==':' && + (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ + pQuery->nextColumn = iCol; + continue; + } + if( !inPhrase && pQuery->nTerms>0 && nToken==2 + && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ + pQuery->nextIsOr = 1; + continue; + } + queryAdd(pQuery, pToken, nToken); + if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ + pQuery->pTerms[pQuery->nTerms-1].isNot = 1; + } + pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; + if( inPhrase ){ + nTerm++; + } + } + + if( inPhrase && pQuery->nTerms>firstIndex ){ + pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object pQuery. +** +** The calling function will need to queryClear() to clean up +** the dynamically allocated memory held by pQuery. +*/ +static int parseQuery( + fulltext_vtab *v, /* The fulltext index */ + const char *zInput, /* Input text of the query string */ + int nInput, /* Size of the input text */ + int dfltColumn, /* Default column of the index to match against */ + Query *pQuery /* Write the parse results here. */ +){ + int iInput, inPhrase = 0; + + if( zInput==0 ) nInput = 0; + if( nInput<0 ) nInput = strlen(zInput); + pQuery->nTerms = 0; + pQuery->pTerms = NULL; + pQuery->nextIsOr = 0; + pQuery->nextColumn = dfltColumn; + pQuery->dfltColumn = dfltColumn; + pQuery->pFts = v; + + for(iInput=0; iInputiInput ){ + tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, + pQuery); + } + iInput = i; + if( i=nColumn +** they are allowed to match against any column. +*/ +static int fulltextQuery( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* Match against this column by default */ + const char *zInput, /* The query string */ + int nInput, /* Number of bytes in zInput[] */ + DocList **pResult, /* Write the result doclist here */ + Query *pQuery /* Put parsed query string here */ +){ + int i, iNext, rc; + DocList *pLeft = NULL; + DocList *pRight, *pNew, *pOr; + int nNot = 0; + QueryTerm *aTerm; + + rc = parseQuery(v, zInput, nInput, iColumn, pQuery); + if( rc!=SQLITE_OK ) return rc; + + /* Merge AND terms. */ + aTerm = pQuery->pTerms; + for(i = 0; inTerms; i=iNext){ + if( aTerm[i].isNot ){ + /* Handle all NOT terms in a separate pass */ + nNot++; + iNext = i + aTerm[i].nPhrase+1; + continue; + } + iNext = i + aTerm[i].nPhrase + 1; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &pRight); + if( rc ){ + queryClear(pQuery); + return rc; + } + while( iNextnTerms && aTerm[iNext].isOr ){ + rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &pOr); + iNext += aTerm[iNext].nPhrase + 1; + if( rc ){ + queryClear(pQuery); + return rc; + } + pNew = docListNew(DL_DOCIDS); + docListOrMerge(pRight, pOr, pNew); + docListDelete(pRight); + docListDelete(pOr); + pRight = pNew; + } + if( pLeft==0 ){ + pLeft = pRight; + }else{ + pNew = docListNew(DL_DOCIDS); + docListAndMerge(pLeft, pRight, pNew); + docListDelete(pRight); + docListDelete(pLeft); + pLeft = pNew; + } + } + + if( nNot && pLeft==0 ){ + /* We do not yet know how to handle a query of only NOT terms */ + return SQLITE_ERROR; + } + + /* Do the EXCEPT terms */ + for(i=0; inTerms; i += aTerm[i].nPhrase + 1){ + if( !aTerm[i].isNot ) continue; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &pRight); + if( rc ){ + queryClear(pQuery); + docListDelete(pLeft); + return rc; + } + pNew = docListNew(DL_DOCIDS); + docListExceptMerge(pLeft, pRight, pNew); + docListDelete(pRight); + docListDelete(pLeft); + pLeft = pNew; + } + + *pResult = pLeft; + return rc; +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==QUERY_GENERIC then do a full table scan against +** the %_content table. +** +** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry +** in the %_content table. +** +** If idxNum>=QUERY_FULLTEXT then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fulltextFilter() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts2 as appropriate. +*/ +static int fulltextFilter( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, const char *idxStr, /* Which indexing scheme to use */ + int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ +){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + char *zSql; + + TRACE(("FTS1 Filter %p\n",pCursor)); + + zSql = sqlite3_mprintf("select rowid, * from %%_content %s", + idxNum==QUERY_GENERIC ? "" : "where rowid=?"); + sqlite3_finalize(c->pStmt); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ) return rc; + + c->iCursorType = idxNum; + switch( idxNum ){ + case QUERY_GENERIC: + break; + + case QUERY_ROWID: + rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); + if( rc!=SQLITE_OK ) return rc; + break; + + default: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + DocList *pResult; + assert( idxNum<=QUERY_FULLTEXT+v->nColumn); + assert( argc==1 ); + queryClear(&c->q); + rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &pResult, &c->q); + if( rc!=SQLITE_OK ) return rc; + if( c->result.pDoclist!=NULL ) docListDelete(c->result.pDoclist); + readerInit(&c->result, pResult); + break; + } + } + + return fulltextNext(pCursor); +} + +/* This is the xEof method of the virtual table. The SQLite core +** calls this routine to find out if it has reached the end of +** a query's results set. +*/ +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +/* This is the xColumn method of the virtual table. The SQLite +** core calls this method during a query when it needs the value +** of a column from the virtual table. This method needs to use +** one of the sqlite3_result_*() routines to store the requested +** value back in the pContext. +*/ +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + + if( idxColnColumn ){ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); + sqlite3_result_value(pContext, pVal); + }else if( idxCol==v->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor + */ + sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); + } + return SQLITE_OK; +} + +/* This is the xRowid method. The SQLite core calls this routine to +** retrive the rowid for the current row of the result set. The +** rowid should be written to *pRowid. +*/ +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Add all terms in [zText] to the given hash table. If [iColumn] > 0, + * we also store positions and offsets in the hash table using the given + * column number. */ +static int buildTerms(fulltext_vtab *v, fts1Hash *terms, sqlite_int64 iDocid, + const char *zText, int iColumn){ + sqlite3_tokenizer *pTokenizer = v->pTokenizer; + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + int rc; + + rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition) ){ + DocList *p; + + /* Positions can't be negative; we use -1 as a terminator internally. */ + if( iPosition<0 ){ + pTokenizer->pModule->xClose(pCursor); + return SQLITE_ERROR; + } + + p = fts1HashFind(terms, pToken, nTokenBytes); + if( p==NULL ){ + p = docListNew(DL_DEFAULT); + docListAddDocid(p, iDocid); + fts1HashInsert(terms, pToken, nTokenBytes, p); + } + if( iColumn>=0 ){ + docListAddPosOffset(p, iColumn, iPosition, iStartOffset, iEndOffset); + } + } + + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + return rc; +} + +/* Update the %_terms table to map the term [pTerm] to the given rowid. */ +static int index_insert_term(fulltext_vtab *v, const char *pTerm, int nTerm, + DocList *d){ + sqlite_int64 iIndexRow; + DocList doclist; + int iSegment = 0, rc; + + rc = term_select(v, pTerm, nTerm, iSegment, &iIndexRow, &doclist); + if( rc==SQLITE_DONE ){ + docListInit(&doclist, DL_DEFAULT, 0, 0); + docListUpdate(&doclist, d); + /* TODO(shess) Consider length(doclist)>CHUNK_MAX? */ + rc = term_insert(v, NULL, pTerm, nTerm, iSegment, &doclist); + goto err; + } + if( rc!=SQLITE_ROW ) return SQLITE_ERROR; + + docListUpdate(&doclist, d); + if( doclist.nData<=CHUNK_MAX ){ + rc = term_update(v, iIndexRow, &doclist); + goto err; + } + + /* Doclist doesn't fit, delete what's there, and accumulate + ** forward. + */ + rc = term_delete(v, iIndexRow); + if( rc!=SQLITE_OK ) goto err; + + /* Try to insert the doclist into a higher segment bucket. On + ** failure, accumulate existing doclist with the doclist from that + ** bucket, and put results in the next bucket. + */ + iSegment++; + while( (rc=term_insert(v, &iIndexRow, pTerm, nTerm, iSegment, + &doclist))!=SQLITE_OK ){ + sqlite_int64 iSegmentRow; + DocList old; + int rc2; + + /* Retain old error in case the term_insert() error was really an + ** error rather than a bounced insert. + */ + rc2 = term_select(v, pTerm, nTerm, iSegment, &iSegmentRow, &old); + if( rc2!=SQLITE_ROW ) goto err; + + rc = term_delete(v, iSegmentRow); + if( rc!=SQLITE_OK ) goto err; + + /* Reusing lowest-number deleted row keeps the index smaller. */ + if( iSegmentRownColumn ; ++i){ + char *zText = (char*)sqlite3_value_text(pValues[i]); + int rc = buildTerms(v, terms, iRowid, zText, i); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Add empty doclists for all terms in the given row's content to the hash + * table [pTerms]. */ +static int deleteTerms(fulltext_vtab *v, fts1Hash *pTerms, sqlite_int64 iRowid){ + const char **pValues; + int i; + + int rc = content_select(v, iRowid, &pValues); + if( rc!=SQLITE_OK ) return rc; + + for(i = 0 ; i < v->nColumn; ++i) { + rc = buildTerms(v, pTerms, iRowid, pValues[i], -1); + if( rc!=SQLITE_OK ) break; + } + + freeStringArray(v->nColumn, pValues); + return SQLITE_OK; +} + +/* Insert a row into the %_content table; set *piRowid to be the ID of the + * new row. Fill [pTerms] with new doclists for the %_term table. */ +static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, + sqlite3_value **pValues, + sqlite_int64 *piRowid, fts1Hash *pTerms){ + int rc; + + rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ + if( rc!=SQLITE_OK ) return rc; + *piRowid = sqlite3_last_insert_rowid(v->db); + return insertTerms(v, pTerms, *piRowid, pValues); +} + +/* Delete a row from the %_content table; fill [pTerms] with empty doclists + * to be written to the %_term table. */ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow, fts1Hash *pTerms){ + int rc = deleteTerms(v, pTerms, iRow); + if( rc!=SQLITE_OK ) return rc; + return content_delete(v, iRow); /* execute an SQL DELETE */ +} + +/* Update a row in the %_content table; fill [pTerms] with new doclists for the + * %_term table. */ +static int index_update(fulltext_vtab *v, sqlite_int64 iRow, + sqlite3_value **pValues, fts1Hash *pTerms){ + /* Generate an empty doclist for each term that previously appeared in this + * row. */ + int rc = deleteTerms(v, pTerms, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ + if( rc!=SQLITE_OK ) return rc; + + /* Now add positions for terms which appear in the updated row. */ + return insertTerms(v, pTerms, iRow, pValues); +} + +/* This function implements the xUpdate callback; it's the top-level entry + * point for inserting, deleting or updating a row in a full-text table. */ +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + fts1Hash terms; /* maps term string -> PosList */ + int rc; + fts1HashElem *e; + + TRACE(("FTS1 Update %p\n", pVtab)); + + fts1HashInit(&terms, FTS1_HASH_STRING, 1); + + if( nArg<2 ){ + rc = index_delete(v, sqlite3_value_int64(ppArg[0]), &terms); + } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + /* An update: + * ppArg[0] = old rowid + * ppArg[1] = new rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); + if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the rowid */ + } else { + assert( nArg==2+v->nColumn+1); + rc = index_update(v, rowid, &ppArg[2], &terms); + } + } else { + /* An insert: + * ppArg[1] = requested rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + assert( nArg==2+v->nColumn+1); + rc = index_insert(v, ppArg[1], &ppArg[2], pRowid, &terms); + } + + if( rc==SQLITE_OK ){ + /* Write updated doclists to disk. */ + for(e=fts1HashFirst(&terms); e; e=fts1HashNext(e)){ + DocList *p = fts1HashData(e); + rc = index_insert_term(v, fts1HashKey(e), fts1HashKeysize(e), p); + if( rc!=SQLITE_OK ) break; + } + } + + /* clean up */ + for(e=fts1HashFirst(&terms); e; e=fts1HashNext(e)){ + DocList *p = fts1HashData(e); + docListDelete(p); + } + fts1HashClear(&terms); + + return rc; +} + +/* +** Implementation of the snippet() function for FTS1 +*/ +static void snippetFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); + }else{ + const char *zStart = ""; + const char *zEnd = ""; + const char *zEllipsis = "..."; + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + if( argc>=2 ){ + zStart = (const char*)sqlite3_value_text(argv[1]); + if( argc>=3 ){ + zEnd = (const char*)sqlite3_value_text(argv[2]); + if( argc>=4 ){ + zEllipsis = (const char*)sqlite3_value_text(argv[3]); + } + } + } + snippetAllOffsets(pCursor); + snippetText(pCursor, zStart, zEnd, zEllipsis); + sqlite3_result_text(pContext, pCursor->snippet.zSnippet, + pCursor->snippet.nSnippet, SQLITE_STATIC); + } +} + +/* +** Implementation of the offsets() function for FTS1 +*/ +static void snippetOffsetsFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to offsets",-1); + }else{ + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + snippetAllOffsets(pCursor); + snippetOffsetText(&pCursor->snippet); + sqlite3_result_text(pContext, + pCursor->snippet.zOffset, pCursor->snippet.nOffset, + SQLITE_STATIC); + } +} + +/* +** This routine implements the xFindFunction method for the FTS1 +** virtual table. +*/ +static int fulltextFindFunction( + sqlite3_vtab *pVtab, + int nArg, + const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg +){ + if( strcmp(zName,"snippet")==0 ){ + *pxFunc = snippetFunc; + return 1; + }else if( strcmp(zName,"offsets")==0 ){ + *pxFunc = snippetOffsetsFunc; + return 1; + } + return 0; +} + +/* +** Rename an fts1 table. +*/ +static int fulltextRename( + sqlite3_vtab *pVtab, + const char *zName +){ + fulltext_vtab *p = (fulltext_vtab *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" + "ALTER TABLE %Q.'%q_term' RENAME TO '%q_term';" + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + ); + if( zSql ){ + rc = sqlite3_exec(p->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static const sqlite3_module fulltextModule = { + /* iVersion */ 0, + /* xCreate */ fulltextCreate, + /* xConnect */ fulltextConnect, + /* xBestIndex */ fulltextBestIndex, + /* xDisconnect */ fulltextDisconnect, + /* xDestroy */ fulltextDestroy, + /* xOpen */ fulltextOpen, + /* xClose */ fulltextClose, + /* xFilter */ fulltextFilter, + /* xNext */ fulltextNext, + /* xEof */ fulltextEof, + /* xColumn */ fulltextColumn, + /* xRowid */ fulltextRowid, + /* xUpdate */ fulltextUpdate, + /* xBegin */ 0, + /* xSync */ 0, + /* xCommit */ 0, + /* xRollback */ 0, + /* xFindFunction */ fulltextFindFunction, + /* xRename */ fulltextRename, +}; + +int sqlite3Fts1Init(sqlite3 *db){ + sqlite3_overload_function(db, "snippet", -1); + sqlite3_overload_function(db, "offsets", -1); + return sqlite3_create_module(db, "fts1", &fulltextModule, 0); +} + +#if !SQLITE_CORE +int sqlite3_extension_init(sqlite3 *db, char **pzErrMsg, + const sqlite3_api_routines *pApi){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts1Init(db); +} +#endif + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1.h new file mode 100644 index 0000000000..d55e689733 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1.h @@ -0,0 +1,11 @@ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3Fts1Init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_hash.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_hash.c new file mode 100644 index 0000000000..463a52b645 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_hash.c @@ -0,0 +1,369 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ +#include +#include +#include + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + + +#include "fts1_hash.h" + +static void *malloc_and_zero(int n){ + void *p = malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS1_HASH_BINARY or FTS1_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +void sqlite3Fts1HashInit(fts1Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS1_HASH_STRING && keyClass<=FTS1_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; + pNew->xMalloc = malloc_and_zero; + pNew->xFree = free; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3Fts1HashClear(fts1Hash *pH){ + fts1HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + fts1HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS1_HASH_STRING +*/ +static int strHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS1_HASH_BINARY +*/ +static int binHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS1_HASH_STRING ){ + return &strHash; + }else{ + assert( keyClass==FTS1_HASH_BINARY ); + return &binHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS1_HASH_STRING ){ + return &strCompare; + }else{ + assert( keyClass==FTS1_HASH_BINARY ); + return &binCompare; + } +} + +/* Link an element into the hash table +*/ +static void insertElement( + fts1Hash *pH, /* The complete hash table */ + struct _fts1ht *pEntry, /* The entry into which pNew is inserted */ + fts1HashElem *pNew /* The element to be inserted */ +){ + fts1HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void rehash(fts1Hash *pH, int new_size){ + struct _fts1ht *new_ht; /* The new hash table */ + fts1HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts1ht *)pH->xMalloc( new_size*sizeof(struct _fts1ht) ); + if( new_ht==0 ) return; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static fts1HashElem *findElementGivenHash( + const fts1Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + fts1HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts1ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + fts1Hash *pH, /* The pH containing "elem" */ + fts1HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts1ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts1HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3Fts1HashFind(const fts1Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + fts1HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3Fts1HashInsert( + fts1Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + fts1HashElem *elem; /* Used to loop thru the element list */ + fts1HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = findElementGivenHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (fts1HashElem*)pH->xMalloc( sizeof(fts1HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = pH->xMalloc( nKey ); + if( new_elem->pKey==0 ){ + pH->xFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + pH->xFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + insertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_hash.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_hash.h new file mode 100644 index 0000000000..c31c430f7c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_hash.h @@ -0,0 +1,112 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS1_HASH_H_ +#define _FTS1_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct fts1Hash fts1Hash; +typedef struct fts1HashElem fts1HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct fts1Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + fts1HashElem *first; /* The first element of the array */ + void *(*xMalloc)(int); /* malloc() function to use */ + void (*xFree)(void *); /* free() function to use */ + int htsize; /* Number of buckets in the hash table */ + struct _fts1ht { /* the hash table */ + int count; /* Number of entries with this hash */ + fts1HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct fts1HashElem { + fts1HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS1_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS1_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts1HashInit is 1. +*/ +#define FTS1_HASH_STRING 1 +#define FTS1_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3Fts1HashInit(fts1Hash*, int keytype, int copyKey); +void *sqlite3Fts1HashInsert(fts1Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3Fts1HashFind(const fts1Hash*, const void *pKey, int nKey); +void sqlite3Fts1HashClear(fts1Hash*); + +/* +** Shorthand for the functions above +*/ +#define fts1HashInit sqlite3Fts1HashInit +#define fts1HashInsert sqlite3Fts1HashInsert +#define fts1HashFind sqlite3Fts1HashFind +#define fts1HashClear sqlite3Fts1HashClear + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** fts1Hash h; +** fts1HashElem *p; +** ... +** for(p=fts1HashFirst(&h); p; p=fts1HashNext(p)){ +** SomeStructure *pData = fts1HashData(p); +** // do something with pData +** } +*/ +#define fts1HashFirst(H) ((H)->first) +#define fts1HashNext(E) ((E)->next) +#define fts1HashData(E) ((E)->data) +#define fts1HashKey(E) ((E)->pKey) +#define fts1HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts1HashCount(H) ((H)->count) + +#endif /* _FTS1_HASH_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_porter.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_porter.c new file mode 100644 index 0000000000..1d26236681 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_porter.c @@ -0,0 +1,643 @@ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + + +#include +#include +#include +#include +#include + +#include "fts1_tokenizer.h" + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlit3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module porterTokenizerModule; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + t = (porter_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + c = (porter_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + free(c->zToken); + free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + z[0]!=0 && isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + z[1]!=0 && isVowel(z+1) && + z[2]!=0 && isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i=sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + stem(&z, "lanoita", "ate", m_gt_0) || + stem(&z, "lanoit", "tion", m_gt_0); + break; + case 'c': + stem(&z, "icne", "ence", m_gt_0) || + stem(&z, "icna", "ance", m_gt_0); + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + stem(&z, "ilb", "ble", m_gt_0) || + stem(&z, "illa", "al", m_gt_0) || + stem(&z, "iltne", "ent", m_gt_0) || + stem(&z, "ile", "e", m_gt_0) || + stem(&z, "ilsuo", "ous", m_gt_0); + break; + case 'o': + stem(&z, "noitazi", "ize", m_gt_0) || + stem(&z, "noita", "ate", m_gt_0) || + stem(&z, "rota", "ate", m_gt_0); + break; + case 's': + stem(&z, "msila", "al", m_gt_0) || + stem(&z, "ssenevi", "ive", m_gt_0) || + stem(&z, "ssenluf", "ful", m_gt_0) || + stem(&z, "ssensuo", "ous", m_gt_0); + break; + case 't': + stem(&z, "itila", "al", m_gt_0) || + stem(&z, "itivi", "ive", m_gt_0) || + stem(&z, "itilib", "ble", m_gt_0); + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + stem(&z, "etaci", "ic", m_gt_0) || + stem(&z, "evita", "", m_gt_0) || + stem(&z, "ezila", "al", m_gt_0); + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + stem(&z, "laci", "ic", m_gt_0) || + stem(&z, "luf", "", m_gt_0); + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + stem(&z, "tneme", "", m_gt_1) || + stem(&z, "tnem", "", m_gt_1) || + stem(&z, "tne", "", m_gt_1); + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + stem(&z, "eta", "", m_gt_1) || + stem(&z, "iti", "", m_gt_1); + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char isIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define idChar(C) (((ch=C)&0x80)!=0 || (ch>0x2f && isIdChar[ch-0x30])) +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !isIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffsetnInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + c->nAllocated = n+20; + c->zToken = realloc(c->zToken, c->nAllocated); + if( c->zToken==NULL ) return SQLITE_NOMEM; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts1PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_tokenizer.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_tokenizer.h new file mode 100644 index 0000000000..a48cb74519 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_tokenizer.h @@ -0,0 +1,90 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS1_TOKENIZER_H_ +#define _FTS1_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. +*/ +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; + +struct sqlite3_tokenizer_module { + int iVersion; /* currently 0 */ + + /* + ** Create and destroy a tokenizer. argc/argv are passed down from + ** the fulltext virtual table creation to allow customization. + */ + int (*xCreate)(int argc, const char *const*argv, + sqlite3_tokenizer **ppTokenizer); + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Tokenize a particular input. Call xOpen() to prepare to + ** tokenize, xNext() repeatedly until it returns SQLITE_DONE, then + ** xClose() to free any internal state. The pInput passed to + ** xOpen() must exist until the cursor is closed. The ppToken + ** result from xNext() is only valid until the next call to xNext() + ** or until xClose() is called. + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xOpen)(sqlite3_tokenizer *pTokenizer, + const char *pInput, int nBytes, + sqlite3_tokenizer_cursor **ppCursor); + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + int (*xNext)(sqlite3_tokenizer_cursor *pCursor, + const char **ppToken, int *pnBytes, + int *piStartOffset, int *piEndOffset, int *piPosition); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +/* +** Get the module for a tokenizer which generates tokens based on a +** set of non-token characters. The default is to break tokens at any +** non-alnum character, though the set of delimiters can also be +** specified by the first argv argument to xCreate(). +*/ +/* TODO(shess) This doesn't belong here. Need some sort of +** registration process. +*/ +void sqlite3Fts1SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts1PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +#endif /* _FTS1_TOKENIZER_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_tokenizer1.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_tokenizer1.c new file mode 100644 index 0000000000..f58fba8f8e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts1/fts1_tokenizer1.c @@ -0,0 +1,221 @@ +/* +** The author disclaims copyright to this source code. +** +************************************************************************* +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + + +#include +#include +#include +#include +#include + +#include "fts1_tokenizer.h" + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module simpleTokenizerModule; + +static int isDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = strlen(argv[1]); + for(i=0; i=0x80 ){ + free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !isalnum(i); + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + free(c->pToken); + free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffsetnBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffsetnBytes && isDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnBytes && !isDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + c->nTokenAllocated = n+20; + c->pToken = realloc(c->pToken, c->nTokenAllocated); + if( c->pToken==NULL ) return SQLITE_NOMEM; + } + for(i=0; ipToken[i] = ch<0x80 ? tolower(ch) : ch; + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts1SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/README.tokenizers b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/README.tokenizers new file mode 100644 index 0000000000..6625b316b1 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/README.tokenizers @@ -0,0 +1,134 @@ + +1. FTS2 Tokenizers + + When creating a new full-text table, FTS2 allows the user to select + the text tokenizer implementation to be used when indexing text + by specifying a "tokenizer" clause as part of the CREATE VIRTUAL TABLE + statement: + + CREATE VIRTUAL TABLE USING fts2( + [, tokenizer []] + ); + + The built-in tokenizers (valid values to pass as ) are + "simple" and "porter". + + should consist of zero or more white-space separated + arguments to pass to the selected tokenizer implementation. The + interpretation of the arguments, if any, depends on the individual + tokenizer. + +2. Custom Tokenizers + + FTS2 allows users to provide custom tokenizer implementations. The + interface used to create a new tokenizer is defined and described in + the fts2_tokenizer.h source file. + + Registering a new FTS2 tokenizer is similar to registering a new + virtual table module with SQLite. The user passes a pointer to a + structure containing pointers to various callback functions that + make up the implementation of the new tokenizer type. For tokenizers, + the structure (defined in fts2_tokenizer.h) is called + "sqlite3_tokenizer_module". + + FTS2 does not expose a C-function that users call to register new + tokenizer types with a database handle. Instead, the pointer must + be encoded as an SQL blob value and passed to FTS2 through the SQL + engine by evaluating a special scalar function, "fts2_tokenizer()". + The fts2_tokenizer() function may be called with one or two arguments, + as follows: + + SELECT fts2_tokenizer(); + SELECT fts2_tokenizer(, ); + + Where is a string identifying the tokenizer and + is a pointer to an sqlite3_tokenizer_module + structure encoded as an SQL blob. If the second argument is present, + it is registered as tokenizer and a copy of it + returned. If only one argument is passed, a pointer to the tokenizer + implementation currently registered as is returned, + encoded as a blob. Or, if no such tokenizer exists, an SQL exception + (error) is raised. + + SECURITY: If the fts2 extension is used in an environment where potentially + malicious users may execute arbitrary SQL (i.e. gears), they should be + prevented from invoking the fts2_tokenizer() function, possibly using the + authorisation callback. + + See "Sample code" below for an example of calling the fts2_tokenizer() + function from C code. + +3. ICU Library Tokenizers + + If this extension is compiled with the SQLITE_ENABLE_ICU pre-processor + symbol defined, then there exists a built-in tokenizer named "icu" + implemented using the ICU library. The first argument passed to the + xCreate() method (see fts2_tokenizer.h) of this tokenizer may be + an ICU locale identifier. For example "tr_TR" for Turkish as used + in Turkey, or "en_AU" for English as used in Australia. For example: + + "CREATE VIRTUAL TABLE thai_text USING fts2(text, tokenizer icu th_TH)" + + The ICU tokenizer implementation is very simple. It splits the input + text according to the ICU rules for finding word boundaries and discards + any tokens that consist entirely of white-space. This may be suitable + for some applications in some locales, but not all. If more complex + processing is required, for example to implement stemming or + discard punctuation, this can be done by creating a tokenizer + implementation that uses the ICU tokenizer as part of it's implementation. + + When using the ICU tokenizer this way, it is safe to overwrite the + contents of the strings returned by the xNext() method (see + fts2_tokenizer.h). + +4. Sample code. + + The following two code samples illustrate the way C code should invoke + the fts2_tokenizer() scalar function: + + int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); + } + + int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); + } + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/README.txt b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/README.txt new file mode 100644 index 0000000000..517a2a0434 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/README.txt @@ -0,0 +1,4 @@ +This folder contains source code to the second full-text search +extension for SQLite. While the API is the same, this version uses a +substantially different storage schema from fts1, so tables will need +to be rebuilt. diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2.c new file mode 100644 index 0000000000..65ad173abc --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2.c @@ -0,0 +1,5936 @@ +/* fts2 has a design flaw which can lead to database corruption (see +** below). It is recommended not to use it any longer, instead use +** fts3 (or higher). If you believe that your use of fts2 is safe, +** add -DSQLITE_ENABLE_BROKEN_FTS2=1 to your CFLAGS. +*/ +#ifndef SQLITE_ENABLE_BROKEN_FTS2 +#error fts2 has a design flaw and has been deprecated. +#endif +/* The flaw is that fts2 uses the content table's unaliased rowid as +** the unique docid. fts2 embeds the rowid in the index it builds, +** and expects the rowid to not change. The SQLite VACUUM operation +** will renumber such rowids, thereby breaking fts2. If you are using +** fts2 in a system which has disabled VACUUM, then you can continue +** to use it safely. Note that PRAGMA auto_vacuum does NOT disable +** VACUUM, though systems using auto_vacuum are unlikely to invoke +** VACUUM. +** +** Unlike fts1, which is safe across VACUUM if you never delete +** documents, fts2 has a second exposure to this flaw, in the segments +** table. So fts2 should be considered unsafe across VACUUM in all +** cases. +*/ + +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is an SQLite module implementing full-text search. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ + +/* TODO(shess) Consider exporting this comment to an HTML file or the +** wiki. +*/ +/* The full-text index is stored in a series of b+tree (-like) +** structures called segments which map terms to doclists. The +** structures are like b+trees in layout, but are constructed from the +** bottom up in optimal fashion and are not updatable. Since trees +** are built from the bottom up, things will be described from the +** bottom up. +** +** +**** Varints **** +** The basic unit of encoding is a variable-length integer called a +** varint. We encode variable-length integers in little-endian order +** using seven bits * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +** +** This is identical to how sqlite encodes varints (see util.c). +** +** +**** Document lists **** +** A doclist (document list) holds a docid-sorted list of hits for a +** given term. Doclists hold docids, and can optionally associate +** token positions and offsets with docids. +** +** A DL_POSITIONS_OFFSETS doclist is stored like this: +** +** array { +** varint docid; +** array { (position list for column 0) +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset; (delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** array { +** varint POS_COLUMN; (marks start of position list for new column) +** varint column; (index of new column) +** array { +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset;(delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** } +** varint POS_END; (marks end of positions for this document. +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. A "position" is an index of a token in the token stream +** generated by the tokenizer, while an "offset" is a byte offset, +** both based at 0. Note that POS_END and POS_COLUMN occur in the +** same logical place as the position element, and act as sentinals +** ending a position list array. +** +** A DL_POSITIONS doclist omits the startOffset and endOffset +** information. A DL_DOCIDS doclist omits both the position and +** offset information, becoming an array of varint-encoded docids. +** +** On-disk data is stored as type DL_DEFAULT, so we don't serialize +** the type. Due to how deletion is implemented in the segmentation +** system, on-disk doclists MUST store at least positions. +** +** +**** Segment leaf nodes **** +** Segment leaf nodes store terms and doclists, ordered by term. Leaf +** nodes are written using LeafWriter, and read using LeafReader (to +** iterate through a single leaf node's data) and LeavesReader (to +** iterate through a segment's entire leaf layer). Leaf nodes have +** the format: +** +** varint iHeight; (height from leaf level, always 0) +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of prefix shared with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix];(unshared suffix of next term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. +** +** Leaf nodes are broken into blocks which are stored contiguously in +** the %_segments table in sorted order. This means that when the end +** of a node is reached, the next term is in the node with the next +** greater node id. +** +** New data is spilled to a new leaf node when the current node +** exceeds LEAF_MAX bytes (default 2048). New data which itself is +** larger than STANDALONE_MIN (default 1024) is placed in a standalone +** node (a leaf node with a single term and doclist). The goal of +** these settings is to pack together groups of small doclists while +** making it efficient to directly access large doclists. The +** assumption is that large doclists represent terms which are more +** likely to be query targets. +** +** TODO(shess) It may be useful for blocking decisions to be more +** dynamic. For instance, it may make more sense to have a 2.5k leaf +** node rather than splitting into 2k and .5k nodes. My intuition is +** that this might extend through 2x or 4x the pagesize. +** +** +**** Segment interior nodes **** +** Segment interior nodes store blockids for subtree nodes and terms +** to describe what data is stored by the each subtree. Interior +** nodes are written using InteriorWriter, and read using +** InteriorReader. InteriorWriters are created as needed when +** SegmentWriter creates new leaf nodes, or when an interior node +** itself grows too big and must be split. The format of interior +** nodes: +** +** varint iHeight; (height from leaf level, always >0) +** varint iBlockid; (block id of node's leftmost subtree) +** optional { +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of shared prefix with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix]; (unshared suffix of next term) +** } +** } +** +** Here, optional { X } means an optional element, while array { X } +** means zero or more occurrences of X, adjacent in memory. +** +** An interior node encodes n terms separating n+1 subtrees. The +** subtree blocks are contiguous, so only the first subtree's blockid +** is encoded. The subtree at iBlockid will contain all terms less +** than the first term encoded (or all terms if no term is encoded). +** Otherwise, for terms greater than or equal to pTerm[i] but less +** than pTerm[i+1], the subtree for that term will be rooted at +** iBlockid+i. Interior nodes only store enough term data to +** distinguish adjacent children (if the rightmost term of the left +** child is "something", and the leftmost term of the right child is +** "wicked", only "w" is stored). +** +** New data is spilled to a new interior node at the same height when +** the current node exceeds INTERIOR_MAX bytes (default 2048). +** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing +** interior nodes and making the tree too skinny. The interior nodes +** at a given height are naturally tracked by interior nodes at +** height+1, and so on. +** +** +**** Segment directory **** +** The segment directory in table %_segdir stores meta-information for +** merging and deleting segments, and also the root node of the +** segment's tree. +** +** The root node is the top node of the segment's tree after encoding +** the entire segment, restricted to ROOT_MAX bytes (default 1024). +** This could be either a leaf node or an interior node. If the top +** node requires more than ROOT_MAX bytes, it is flushed to %_segments +** and a new root interior node is generated (which should always fit +** within ROOT_MAX because it only needs space for 2 varints, the +** height and the blockid of the previous root). +** +** The meta-information in the segment directory is: +** level - segment level (see below) +** idx - index within level +** - (level,idx uniquely identify a segment) +** start_block - first leaf node +** leaves_end_block - last leaf node +** end_block - last block (including interior nodes) +** root - contents of root node +** +** If the root node is a leaf node, then start_block, +** leaves_end_block, and end_block are all 0. +** +** +**** Segment merging **** +** To amortize update costs, segments are groups into levels and +** merged in matches. Each increase in level represents exponentially +** more documents. +** +** New documents (actually, document updates) are tokenized and +** written individually (using LeafWriter) to a level 0 segment, with +** incrementing idx. When idx reaches MERGE_COUNT (default 16), all +** level 0 segments are merged into a single level 1 segment. Level 1 +** is populated like level 0, and eventually MERGE_COUNT level 1 +** segments are merged to a single level 2 segment (representing +** MERGE_COUNT^2 updates), and so on. +** +** A segment merge traverses all segments at a given level in +** parallel, performing a straightforward sorted merge. Since segment +** leaf nodes are written in to the %_segments table in order, this +** merge traverses the underlying sqlite disk structures efficiently. +** After the merge, all segment blocks from the merged level are +** deleted. +** +** MERGE_COUNT controls how often we merge segments. 16 seems to be +** somewhat of a sweet spot for insertion performance. 32 and 64 show +** very similar performance numbers to 16 on insertion, though they're +** a tiny bit slower (perhaps due to more overhead in merge-time +** sorting). 8 is about 20% slower than 16, 4 about 50% slower than +** 16, 2 about 66% slower than 16. +** +** At query time, high MERGE_COUNT increases the number of segments +** which need to be scanned and merged. For instance, with 100k docs +** inserted: +** +** MERGE_COUNT segments +** 16 25 +** 8 12 +** 4 10 +** 2 6 +** +** This appears to have only a moderate impact on queries for very +** frequent terms (which are somewhat dominated by segment merge +** costs), and infrequent and non-existent terms still seem to be fast +** even with many segments. +** +** TODO(shess) That said, it would be nice to have a better query-side +** argument for MERGE_COUNT of 16. Also, it's possible/likely that +** optimizations to things like doclist merging will swing the sweet +** spot around. +** +** +** +**** Handling of deletions and updates **** +** Since we're using a segmented structure, with no docid-oriented +** index into the term index, we clearly cannot simply update the term +** index when a document is deleted or updated. For deletions, we +** write an empty doclist (varint(docid) varint(POS_END)), for updates +** we simply write the new doclist. Segment merges overwrite older +** data for a particular docid with newer data, so deletes or updates +** will eventually overtake the earlier data and knock it out. The +** query logic likewise merges doclists so that newer data knocks out +** older data. +** +** TODO(shess) Provide a VACUUM type operation to clear out all +** deletions and duplications. This would basically be a forced merge +** into a single segment. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + +#if defined(SQLITE_ENABLE_FTS2) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#include +#include +#include +#include +#include + +#include "fts2.h" +#include "fts2_hash.h" +#include "fts2_tokenizer.h" +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + + +/* TODO(shess) MAN, this thing needs some refactoring. At minimum, it +** would be nice to order the file better, perhaps something along the +** lines of: +** +** - utility functions +** - table setup functions +** - table update functions +** - table query functions +** +** Put the query functions last because they're likely to reference +** typedefs or functions from the table update section. +*/ + +#if 0 +# define TRACE(A) printf A; fflush(stdout) +#else +# define TRACE(A) +#endif + +/* It is not safe to call isspace(), tolower(), or isalnum() on +** hi-bit-set characters. This is the same solution used in the +** tokenizer. +*/ +/* TODO(shess) The snippet-generation code should be using the +** tokenizer-generated tokens rather than doing its own local +** tokenization. +*/ +/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ +static int safe_isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} +static int safe_tolower(char c){ + return (c&0x80)==0 ? tolower(c) : c; +} +static int safe_isalnum(char c){ + return (c&0x80)==0 ? isalnum(c) : 0; +} + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +/* +** By default, only positions and not offsets are stored in the doclists. +** To change this so that offsets are stored too, compile with +** +** -DDL_DEFAULT=DL_POSITIONS_OFFSETS +** +** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted +** into (no deletes or updates). +*/ +#ifndef DL_DEFAULT +# define DL_DEFAULT DL_POSITIONS +#endif + +enum { + POS_END = 0, /* end of this position list */ + POS_COLUMN, /* followed by new column number */ + POS_BASE +}; + +/* MERGE_COUNT controls how often we merge segments (see comment at +** top of file). +*/ +#define MERGE_COUNT 16 + +/* utility functions */ + +/* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single +** record to prevent errors of the form: +** +** my_function(SomeType *b){ +** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) +** } +*/ +/* TODO(shess) Obvious candidates for a header file. */ +#define CLEAR(b) memset(b, '\0', sizeof(*(b))) + +#ifndef NDEBUG +# define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) +#else +# define SCRAMBLE(b) +#endif + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int putVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int getVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int getVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = getVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*******************************************************************/ +/* DataBuffer is used to collect data into a buffer in piecemeal +** fashion. It implements the usual distinction between amount of +** data currently stored (nData) and buffer capacity (nCapacity). +** +** dataBufferInit - create a buffer with given initial capacity. +** dataBufferReset - forget buffer's data, retaining capacity. +** dataBufferDestroy - free buffer's data. +** dataBufferExpand - expand capacity without adding data. +** dataBufferAppend - append data. +** dataBufferAppend2 - append two pieces of data at once. +** dataBufferReplace - replace buffer's data. +*/ +typedef struct DataBuffer { + char *pData; /* Pointer to malloc'ed buffer. */ + int nCapacity; /* Size of pData buffer. */ + int nData; /* End of data loaded into pData. */ +} DataBuffer; + +static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ + assert( nCapacity>=0 ); + pBuffer->nData = 0; + pBuffer->nCapacity = nCapacity; + pBuffer->pData = nCapacity==0 ? NULL : malloc(nCapacity); +} +static void dataBufferReset(DataBuffer *pBuffer){ + pBuffer->nData = 0; +} +static void dataBufferDestroy(DataBuffer *pBuffer){ + if( pBuffer->pData!=NULL ) free(pBuffer->pData); + SCRAMBLE(pBuffer); +} +static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ + assert( nAddCapacity>0 ); + /* TODO(shess) Consider expanding more aggressively. Note that the + ** underlying malloc implementation may take care of such things for + ** us already. + */ + if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ + pBuffer->nCapacity = pBuffer->nData+nAddCapacity; + pBuffer->pData = realloc(pBuffer->pData, pBuffer->nCapacity); + } +} +static void dataBufferAppend(DataBuffer *pBuffer, + const char *pSource, int nSource){ + assert( nSource>0 && pSource!=NULL ); + dataBufferExpand(pBuffer, nSource); + memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); + pBuffer->nData += nSource; +} +static void dataBufferAppend2(DataBuffer *pBuffer, + const char *pSource1, int nSource1, + const char *pSource2, int nSource2){ + assert( nSource1>0 && pSource1!=NULL ); + assert( nSource2>0 && pSource2!=NULL ); + dataBufferExpand(pBuffer, nSource1+nSource2); + memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); + memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); + pBuffer->nData += nSource1+nSource2; +} +static void dataBufferReplace(DataBuffer *pBuffer, + const char *pSource, int nSource){ + dataBufferReset(pBuffer); + dataBufferAppend(pBuffer, pSource, nSource); +} + +/* StringBuffer is a null-terminated version of DataBuffer. */ +typedef struct StringBuffer { + DataBuffer b; /* Includes null terminator. */ +} StringBuffer; + +static void initStringBuffer(StringBuffer *sb){ + dataBufferInit(&sb->b, 100); + dataBufferReplace(&sb->b, "", 1); +} +static int stringBufferLength(StringBuffer *sb){ + return sb->b.nData-1; +} +static char *stringBufferData(StringBuffer *sb){ + return sb->b.pData; +} +static void stringBufferDestroy(StringBuffer *sb){ + dataBufferDestroy(&sb->b); +} + +static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ + assert( sb->b.nData>0 ); + if( nFrom>0 ){ + sb->b.nData--; + dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); + } +} +static void append(StringBuffer *sb, const char *zFrom){ + nappend(sb, zFrom, strlen(zFrom)); +} + +/* Append a list of strings separated by commas. */ +static void appendList(StringBuffer *sb, int nString, char **azString){ + int i; + for(i=0; i0 ) append(sb, ", "); + append(sb, azString[i]); + } +} + +static int endsInWhiteSpace(StringBuffer *p){ + return stringBufferLength(p)>0 && + safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); +} + +/* If the StringBuffer ends in something other than white space, add a +** single space character to the end. +*/ +static void appendWhiteSpace(StringBuffer *p){ + if( stringBufferLength(p)==0 ) return; + if( !endsInWhiteSpace(p) ) append(p, " "); +} + +/* Remove white space from the end of the StringBuffer */ +static void trimWhiteSpace(StringBuffer *p){ + while( endsInWhiteSpace(p) ){ + p->b.pData[--p->b.nData-1] = '\0'; + } +} + +/*******************************************************************/ +/* DLReader is used to read document elements from a doclist. The +** current docid is cached, so dlrDocid() is fast. DLReader does not +** own the doclist buffer. +** +** dlrAtEnd - true if there's no more data to read. +** dlrDocid - docid of current document. +** dlrDocData - doclist data for current document (including docid). +** dlrDocDataBytes - length of same. +** dlrAllDataBytes - length of all remaining data. +** dlrPosData - position data for current document. +** dlrPosDataLen - length of pos data for current document (incl POS_END). +** dlrStep - step to current document. +** dlrInit - initial for doclist of given type against given data. +** dlrDestroy - clean up. +** +** Expected usage is something like: +** +** DLReader reader; +** dlrInit(&reader, pData, nData); +** while( !dlrAtEnd(&reader) ){ +** // calls to dlrDocid() and kin. +** dlrStep(&reader); +** } +** dlrDestroy(&reader); +*/ +typedef struct DLReader { + DocListType iType; + const char *pData; + int nData; + + sqlite_int64 iDocid; + int nElement; +} DLReader; + +static int dlrAtEnd(DLReader *pReader){ + assert( pReader->nData>=0 ); + return pReader->nData==0; +} +static sqlite_int64 dlrDocid(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->iDocid; +} +static const char *dlrDocData(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->pData; +} +static int dlrDocDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nElement; +} +static int dlrAllDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nData; +} +/* TODO(shess) Consider adding a field to track iDocid varint length +** to make these two functions faster. This might matter (a tiny bit) +** for queries. +*/ +static const char *dlrPosData(DLReader *pReader){ + sqlite_int64 iDummy; + int n = getVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->pData+n; +} +static int dlrPosDataLen(DLReader *pReader){ + sqlite_int64 iDummy; + int n = getVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->nElement-n; +} +static void dlrStep(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + + /* Skip past current doclist element. */ + assert( pReader->nElement<=pReader->nData ); + pReader->pData += pReader->nElement; + pReader->nData -= pReader->nElement; + + /* If there is more data, read the next doclist element. */ + if( pReader->nData!=0 ){ + sqlite_int64 iDocidDelta; + int iDummy, n = getVarint(pReader->pData, &iDocidDelta); + pReader->iDocid += iDocidDelta; + if( pReader->iType>=DL_POSITIONS ){ + assert( nnData ); + while( 1 ){ + n += getVarint32(pReader->pData+n, &iDummy); + assert( n<=pReader->nData ); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += getVarint32(pReader->pData+n, &iDummy); + assert( nnData ); + }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += getVarint32(pReader->pData+n, &iDummy); + n += getVarint32(pReader->pData+n, &iDummy); + assert( nnData ); + } + } + } + pReader->nElement = n; + assert( pReader->nElement<=pReader->nData ); + } +} +static void dlrInit(DLReader *pReader, DocListType iType, + const char *pData, int nData){ + assert( pData!=NULL && nData!=0 ); + pReader->iType = iType; + pReader->pData = pData; + pReader->nData = nData; + pReader->nElement = 0; + pReader->iDocid = 0; + + /* Load the first element's data. There must be a first element. */ + dlrStep(pReader); +} +static void dlrDestroy(DLReader *pReader){ + SCRAMBLE(pReader); +} + +#ifndef NDEBUG +/* Verify that the doclist can be validly decoded. Also returns the +** last docid found because it's convenient in other assertions for +** DLWriter. +*/ +static void docListValidate(DocListType iType, const char *pData, int nData, + sqlite_int64 *pLastDocid){ + sqlite_int64 iPrevDocid = 0; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + while( nData!=0 ){ + sqlite_int64 iDocidDelta; + int n = getVarint(pData, &iDocidDelta); + iPrevDocid += iDocidDelta; + if( iType>DL_DOCIDS ){ + int iDummy; + while( 1 ){ + n += getVarint32(pData+n, &iDummy); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += getVarint32(pData+n, &iDummy); + }else if( iType>DL_POSITIONS ){ + n += getVarint32(pData+n, &iDummy); + n += getVarint32(pData+n, &iDummy); + } + assert( n<=nData ); + } + } + assert( n<=nData ); + pData += n; + nData -= n; + } + if( pLastDocid ) *pLastDocid = iPrevDocid; +} +#define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) +#else +#define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) +#endif + +/*******************************************************************/ +/* DLWriter is used to write doclist data to a DataBuffer. DLWriter +** always appends to the buffer and does not own it. +** +** dlwInit - initialize to write a given type doclistto a buffer. +** dlwDestroy - clear the writer's memory. Does not free buffer. +** dlwAppend - append raw doclist data to buffer. +** dlwCopy - copy next doclist from reader to writer. +** dlwAdd - construct doclist element and append to buffer. +** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). +*/ +typedef struct DLWriter { + DocListType iType; + DataBuffer *b; + sqlite_int64 iPrevDocid; +#ifndef NDEBUG + int has_iPrevDocid; +#endif +} DLWriter; + +static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ + pWriter->b = b; + pWriter->iType = iType; + pWriter->iPrevDocid = 0; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 0; +#endif +} +static void dlwDestroy(DLWriter *pWriter){ + SCRAMBLE(pWriter); +} +/* iFirstDocid is the first docid in the doclist in pData. It is +** needed because pData may point within a larger doclist, in which +** case the first item would be delta-encoded. +** +** iLastDocid is the final docid in the doclist in pData. It is +** needed to create the new iPrevDocid for future delta-encoding. The +** code could decode the passed doclist to recreate iLastDocid, but +** the only current user (docListMerge) already has decoded this +** information. +*/ +/* TODO(shess) This has become just a helper for docListMerge. +** Consider a refactor to make this cleaner. +*/ +static void dlwAppend(DLWriter *pWriter, + const char *pData, int nData, + sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ + sqlite_int64 iDocid = 0; + char c[VARINT_MAX]; + int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ +#ifndef NDEBUG + sqlite_int64 iLastDocidDelta; +#endif + + /* Recode the initial docid as delta from iPrevDocid. */ + nFirstOld = getVarint(pData, &iDocid); + assert( nFirstOldiType==DL_DOCIDS) ); + nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid); + + /* Verify that the incoming doclist is valid AND that it ends with + ** the expected docid. This is essential because we'll trust this + ** docid in future delta-encoding. + */ + ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); + assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); + + /* Append recoded initial docid and everything else. Rest of docids + ** should have been delta-encoded from previous initial docid. + */ + if( nFirstOldb, c, nFirstNew, + pData+nFirstOld, nData-nFirstOld); + }else{ + dataBufferAppend(pWriter->b, c, nFirstNew); + } + pWriter->iPrevDocid = iLastDocid; +} +static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ + dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), + dlrDocid(pReader), dlrDocid(pReader)); +} +static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n = putVarint(c, iDocid-pWriter->iPrevDocid); + + /* Docids must ascend. */ + assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); + assert( pWriter->iType==DL_DOCIDS ); + + dataBufferAppend(pWriter->b, c, n); + pWriter->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 1; +#endif +} + +/*******************************************************************/ +/* PLReader is used to read data from a document's position list. As +** the caller steps through the list, data is cached so that varints +** only need to be decoded once. +** +** plrInit, plrDestroy - create/destroy a reader. +** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors +** plrAtEnd - at end of stream, only call plrDestroy once true. +** plrStep - step to the next element. +*/ +typedef struct PLReader { + /* These refer to the next position's data. nData will reach 0 when + ** reading the last position, so plrStep() signals EOF by setting + ** pData to NULL. + */ + const char *pData; + int nData; + + DocListType iType; + int iColumn; /* the last column read */ + int iPosition; /* the last position read */ + int iStartOffset; /* the last start offset read */ + int iEndOffset; /* the last end offset read */ +} PLReader; + +static int plrAtEnd(PLReader *pReader){ + return pReader->pData==NULL; +} +static int plrColumn(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iColumn; +} +static int plrPosition(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iPosition; +} +static int plrStartOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iStartOffset; +} +static int plrEndOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iEndOffset; +} +static void plrStep(PLReader *pReader){ + int i, n; + + assert( !plrAtEnd(pReader) ); + + if( pReader->nData==0 ){ + pReader->pData = NULL; + return; + } + + n = getVarint32(pReader->pData, &i); + if( i==POS_COLUMN ){ + n += getVarint32(pReader->pData+n, &pReader->iColumn); + pReader->iPosition = 0; + pReader->iStartOffset = 0; + n += getVarint32(pReader->pData+n, &i); + } + /* Should never see adjacent column changes. */ + assert( i!=POS_COLUMN ); + + if( i==POS_END ){ + pReader->nData = 0; + pReader->pData = NULL; + return; + } + + pReader->iPosition += i-POS_BASE; + if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += getVarint32(pReader->pData+n, &i); + pReader->iStartOffset += i; + n += getVarint32(pReader->pData+n, &i); + pReader->iEndOffset = pReader->iStartOffset+i; + } + assert( n<=pReader->nData ); + pReader->pData += n; + pReader->nData -= n; +} + +static void plrInit(PLReader *pReader, DLReader *pDLReader){ + pReader->pData = dlrPosData(pDLReader); + pReader->nData = dlrPosDataLen(pDLReader); + pReader->iType = pDLReader->iType; + pReader->iColumn = 0; + pReader->iPosition = 0; + pReader->iStartOffset = 0; + pReader->iEndOffset = 0; + plrStep(pReader); +} +static void plrDestroy(PLReader *pReader){ + SCRAMBLE(pReader); +} + +/*******************************************************************/ +/* PLWriter is used in constructing a document's position list. As a +** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. +** PLWriter writes to the associated DLWriter's buffer. +** +** plwInit - init for writing a document's poslist. +** plwDestroy - clear a writer. +** plwAdd - append position and offset information. +** plwCopy - copy next position's data from reader to writer. +** plwTerminate - add any necessary doclist terminator. +** +** Calling plwAdd() after plwTerminate() may result in a corrupt +** doclist. +*/ +/* TODO(shess) Until we've written the second item, we can cache the +** first item's information. Then we'd have three states: +** +** - initialized with docid, no positions. +** - docid and one position. +** - docid and multiple positions. +** +** Only the last state needs to actually write to dlw->b, which would +** be an improvement in the DLCollector case. +*/ +typedef struct PLWriter { + DLWriter *dlw; + + int iColumn; /* the last column written */ + int iPos; /* the last position written */ + int iOffset; /* the last start offset written */ +} PLWriter; + +/* TODO(shess) In the case where the parent is reading these values +** from a PLReader, we could optimize to a copy if that PLReader has +** the same type as pWriter. +*/ +static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, + ** iStartOffsetDelta, and iEndOffsetDelta. + */ + char c[5*VARINT_MAX]; + int n = 0; + + /* Ban plwAdd() after plwTerminate(). */ + assert( pWriter->iPos!=-1 ); + + if( pWriter->dlw->iType==DL_DOCIDS ) return; + + if( iColumn!=pWriter->iColumn ){ + n += putVarint(c+n, POS_COLUMN); + n += putVarint(c+n, iColumn); + pWriter->iColumn = iColumn; + pWriter->iPos = 0; + pWriter->iOffset = 0; + } + assert( iPos>=pWriter->iPos ); + n += putVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); + pWriter->iPos = iPos; + if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ + assert( iStartOffset>=pWriter->iOffset ); + n += putVarint(c+n, iStartOffset-pWriter->iOffset); + pWriter->iOffset = iStartOffset; + assert( iEndOffset>=iStartOffset ); + n += putVarint(c+n, iEndOffset-iStartOffset); + } + dataBufferAppend(pWriter->dlw->b, c, n); +} +static void plwCopy(PLWriter *pWriter, PLReader *pReader){ + plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), + plrStartOffset(pReader), plrEndOffset(pReader)); +} +static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n; + + pWriter->dlw = dlw; + + /* Docids must ascend. */ + assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); + n = putVarint(c, iDocid-pWriter->dlw->iPrevDocid); + dataBufferAppend(pWriter->dlw->b, c, n); + pWriter->dlw->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->dlw->has_iPrevDocid = 1; +#endif + + pWriter->iColumn = 0; + pWriter->iPos = 0; + pWriter->iOffset = 0; +} +/* TODO(shess) Should plwDestroy() also terminate the doclist? But +** then plwDestroy() would no longer be just a destructor, it would +** also be doing work, which isn't consistent with the overall idiom. +** Another option would be for plwAdd() to always append any necessary +** terminator, so that the output is always correct. But that would +** add incremental work to the common case with the only benefit being +** API elegance. Punt for now. +*/ +static void plwTerminate(PLWriter *pWriter){ + if( pWriter->dlw->iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = putVarint(c, POS_END); + dataBufferAppend(pWriter->dlw->b, c, n); + } +#ifndef NDEBUG + /* Mark as terminated for assert in plwAdd(). */ + pWriter->iPos = -1; +#endif +} +static void plwDestroy(PLWriter *pWriter){ + SCRAMBLE(pWriter); +} + +/*******************************************************************/ +/* DLCollector wraps PLWriter and DLWriter to provide a +** dynamically-allocated doclist area to use during tokenization. +** +** dlcNew - malloc up and initialize a collector. +** dlcDelete - destroy a collector and all contained items. +** dlcAddPos - append position and offset information. +** dlcAddDoclist - add the collected doclist to the given buffer. +** dlcNext - terminate the current document and open another. +*/ +typedef struct DLCollector { + DataBuffer b; + DLWriter dlw; + PLWriter plw; +} DLCollector; + +/* TODO(shess) This could also be done by calling plwTerminate() and +** dataBufferAppend(). I tried that, expecting nominal performance +** differences, but it seemed to pretty reliably be worth 1% to code +** it this way. I suspect it's the incremental malloc overhead (some +** percentage of the plwTerminate() calls will cause a realloc), so +** this might be worth revisiting if the DataBuffer implementation +** changes. +*/ +static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ + if( pCollector->dlw.iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = putVarint(c, POS_END); + dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); + }else{ + dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); + } +} +static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ + plwTerminate(&pCollector->plw); + plwDestroy(&pCollector->plw); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); +} +static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); +} + +static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ + DLCollector *pCollector = malloc(sizeof(DLCollector)); + dataBufferInit(&pCollector->b, 0); + dlwInit(&pCollector->dlw, iType, &pCollector->b); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); + return pCollector; +} +static void dlcDelete(DLCollector *pCollector){ + plwDestroy(&pCollector->plw); + dlwDestroy(&pCollector->dlw); + dataBufferDestroy(&pCollector->b); + SCRAMBLE(pCollector); + free(pCollector); +} + + +/* Copy the doclist data of iType in pData/nData into *out, trimming +** unnecessary data as we go. Only columns matching iColumn are +** copied, all columns copied if iColumn is -1. Elements with no +** matching columns are dropped. The output is an iOutType doclist. +*/ +/* NOTE(shess) This code is only valid after all doclists are merged. +** If this is run before merges, then doclist items which represent +** deletion will be trimmed, and will thus not effect a deletion +** during the merge. +*/ +static void docListTrim(DocListType iType, const char *pData, int nData, + int iColumn, DocListType iOutType, DataBuffer *out){ + DLReader dlReader; + DLWriter dlWriter; + + assert( iOutType<=iType ); + + dlrInit(&dlReader, iType, pData, nData); + dlwInit(&dlWriter, iOutType, out); + + while( !dlrAtEnd(&dlReader) ){ + PLReader plReader; + PLWriter plWriter; + int match = 0; + + plrInit(&plReader, &dlReader); + + while( !plrAtEnd(&plReader) ){ + if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ + if( !match ){ + plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); + match = 1; + } + plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + } + plrStep(&plReader); + } + if( match ){ + plwTerminate(&plWriter); + plwDestroy(&plWriter); + } + + plrDestroy(&plReader); + dlrStep(&dlReader); + } + dlwDestroy(&dlWriter); + dlrDestroy(&dlReader); +} + +/* Used by docListMerge() to keep doclists in the ascending order by +** docid, then ascending order by age (so the newest comes first). +*/ +typedef struct OrderedDLReader { + DLReader *pReader; + + /* TODO(shess) If we assume that docListMerge pReaders is ordered by + ** age (which we do), then we could use pReader comparisons to break + ** ties. + */ + int idx; +} OrderedDLReader; + +/* Order eof to end, then by docid asc, idx desc. */ +static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ + if( dlrAtEnd(r1->pReader) ){ + if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ + return 1; /* Only r1 atEnd(). */ + } + if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ + + if( dlrDocid(r1->pReader)pReader) ) return -1; + if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; + + /* Descending on idx. */ + return r2->idx-r1->idx; +} + +/* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that +** p[1..n-1] is already sorted. +*/ +/* TODO(shess) Is this frequent enough to warrant a binary search? +** Before implementing that, instrument the code to check. In most +** current usage, I expect that p[0] will be less than p[1] a very +** high proportion of the time. +*/ +static void orderedDLReaderReorder(OrderedDLReader *p, int n){ + while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ + OrderedDLReader tmp = p[0]; + p[0] = p[1]; + p[1] = tmp; + n--; + p++; + } +} + +/* Given an array of doclist readers, merge their doclist elements +** into out in sorted order (by docid), dropping elements from older +** readers when there is a duplicate docid. pReaders is assumed to be +** ordered by age, oldest first. +*/ +/* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably +** be fixed. +*/ +static void docListMerge(DataBuffer *out, + DLReader *pReaders, int nReaders){ + OrderedDLReader readers[MERGE_COUNT]; + DLWriter writer; + int i, n; + const char *pStart = 0; + int nStart = 0; + sqlite_int64 iFirstDocid = 0, iLastDocid = 0; + + assert( nReaders>0 ); + if( nReaders==1 ){ + dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); + return; + } + + assert( nReaders<=MERGE_COUNT ); + n = 0; + for(i=0; i0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + + dlwInit(&writer, pReaders[0].iType, out); + while( !dlrAtEnd(readers[0].pReader) ){ + sqlite_int64 iDocid = dlrDocid(readers[0].pReader); + + /* If this is a continuation of the current buffer to copy, extend + ** that buffer. memcpy() seems to be more efficient if it has a + ** lots of data to copy. + */ + if( dlrDocData(readers[0].pReader)==pStart+nStart ){ + nStart += dlrDocDataBytes(readers[0].pReader); + }else{ + if( pStart!=0 ){ + dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + } + pStart = dlrDocData(readers[0].pReader); + nStart = dlrDocDataBytes(readers[0].pReader); + iFirstDocid = iDocid; + } + iLastDocid = iDocid; + dlrStep(readers[0].pReader); + + /* Drop all of the older elements with the same docid. */ + for(i=1; i0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + } + + /* Copy over any remaining elements. */ + if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + dlwDestroy(&writer); +} + +/* Helper function for posListUnion(). Compares the current position +** between left and right, returning as standard C idiom of <0 if +** left0 if left>right, and 0 if left==right. "End" always +** compares greater. +*/ +static int posListCmp(PLReader *pLeft, PLReader *pRight){ + assert( pLeft->iType==pRight->iType ); + if( pLeft->iType==DL_DOCIDS ) return 0; + + if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; + if( plrAtEnd(pRight) ) return -1; + + if( plrColumn(pLeft)plrColumn(pRight) ) return 1; + + if( plrPosition(pLeft)plrPosition(pRight) ) return 1; + if( pLeft->iType==DL_POSITIONS ) return 0; + + if( plrStartOffset(pLeft)plrStartOffset(pRight) ) return 1; + + if( plrEndOffset(pLeft)plrEndOffset(pRight) ) return 1; + + return 0; +} + +/* Write the union of position lists in pLeft and pRight to pOut. +** "Union" in this case meaning "All unique position tuples". Should +** work with any doclist type, though both inputs and the output +** should be the same type. +*/ +static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pLeft->iType==pRight->iType ); + assert( pLeft->iType==pOut->iType ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + plwInit(&writer, pOut, dlrDocid(pLeft)); + + while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ + int c = posListCmp(&left, &right); + if( c<0 ){ + plwCopy(&writer, &left); + plrStep(&left); + }else if( c>0 ){ + plwCopy(&writer, &right); + plrStep(&right); + }else{ + plwCopy(&writer, &left); + plrStep(&left); + plrStep(&right); + } + } + + plwTerminate(&writer); + plwDestroy(&writer); + plrDestroy(&left); + plrDestroy(&right); +} + +/* Write the union of doclists in pLeft and pRight to pOut. For +** docids in common between the inputs, the union of the position +** lists is written. Inputs and outputs are always type DL_DEFAULT. +*/ +static void docListUnion( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ){ + dataBufferAppend(pOut, pRight, nRight); + return; + } + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DEFAULT, pLeft, nLeft); + dlrInit(&right, DL_DEFAULT, pRight, nRight); + dlwInit(&writer, DL_DEFAULT, pOut); + + while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ + if( dlrAtEnd(&right) ){ + dlwCopy(&writer, &left); + dlrStep(&left); + }else if( dlrAtEnd(&left) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else if( dlrDocid(&left)dlrDocid(&right) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else{ + posListUnion(&left, &right, &writer); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* pLeft and pRight are DLReaders positioned to the same docid. +** +** If there are no instances in pLeft or pRight where the position +** of pLeft is one less than the position of pRight, then this +** routine adds nothing to pOut. +** +** If there are one or more instances where positions from pLeft +** are exactly one less than positions from pRight, then add a new +** document record to pOut. If pOut wants to hold positions, then +** include the positions from pRight that are one more than a +** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. +*/ +static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight, + DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + int match = 0; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pOut->iType!=DL_POSITIONS_OFFSETS ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + + while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ + if( plrColumn(&left)plrColumn(&right) ){ + plrStep(&right); + }else if( plrPosition(&left)+1plrPosition(&right) ){ + plrStep(&right); + }else{ + if( !match ){ + plwInit(&writer, pOut, dlrDocid(pLeft)); + match = 1; + } + plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); + plrStep(&left); + plrStep(&right); + } + } + + if( match ){ + plwTerminate(&writer); + plwDestroy(&writer); + } + + plrDestroy(&left); + plrDestroy(&right); +} + +/* We have two doclists with positions: pLeft and pRight. +** Write the phrase intersection of these two doclists into pOut. +** +** A phrase intersection means that two documents only match +** if pLeft.iPos+1==pRight.iPos. +** +** iType controls the type of data written to pOut. If iType is +** DL_POSITIONS, the positions are those from pRight. +*/ +static void docListPhraseMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DocListType iType, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 || nRight==0 ) return; + + assert( iType!=DL_POSITIONS_OFFSETS ); + + dlrInit(&left, DL_POSITIONS, pLeft, nLeft); + dlrInit(&right, DL_POSITIONS, pRight, nRight); + dlwInit(&writer, iType, pOut); + + while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ + if( dlrDocid(&left) one AND (two OR three) + * [one OR two three] ==> (one OR two) AND three + * + * A "-" before a term matches all entries that lack that term. + * The "-" must occur immediately before the term with in intervening + * space. This is how the search engines do it. + * + * A NOT term cannot be the right-hand operand of an OR. If this + * occurs in the query string, the NOT is ignored: + * + * [one OR -two] ==> one OR two + * + */ +typedef struct Query { + fulltext_vtab *pFts; /* The full text index */ + int nTerms; /* Number of terms in the query */ + QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ + int nextIsOr; /* Set the isOr flag on the next inserted term */ + int nextColumn; /* Next word parsed must be in this column */ + int dfltColumn; /* The default column */ +} Query; + + +/* +** An instance of the following structure keeps track of generated +** matching-word offset information and snippets. +*/ +typedef struct Snippet { + int nMatch; /* Total number of matches */ + int nAlloc; /* Space allocated for aMatch[] */ + struct snippetMatch { /* One entry for each matching term */ + char snStatus; /* Status flag for use while constructing snippets */ + short int iCol; /* The column that contains the match */ + short int iTerm; /* The index in Query.pTerms[] of the matching term */ + short int nByte; /* Number of bytes in the term */ + int iStart; /* The offset to the first character of the term */ + } *aMatch; /* Points to space obtained from malloc */ + char *zOffset; /* Text rendering of aMatch[] */ + int nOffset; /* strlen(zOffset) */ + char *zSnippet; /* Snippet text */ + int nSnippet; /* strlen(zSnippet) */ +} Snippet; + + +typedef enum QueryType { + QUERY_GENERIC, /* table scan */ + QUERY_ROWID, /* lookup by rowid */ + QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ +} QueryType; + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_UPDATE_STMT, + CONTENT_DELETE_STMT, + + BLOCK_INSERT_STMT, + BLOCK_SELECT_STMT, + BLOCK_DELETE_STMT, + + SEGDIR_MAX_INDEX_STMT, + SEGDIR_SET_STMT, + SEGDIR_SELECT_STMT, + SEGDIR_SPAN_STMT, + SEGDIR_DELETE_STMT, + SEGDIR_SELECT_ALL_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(shess): Is there some risk that a statement will be used in two +** cursors at once, e.g. if a query joins a virtual table to itself? +** If so perhaps we should move some of these to the cursor object. +*/ +static const char *const fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ + /* CONTENT_SELECT */ "select * from %_content where rowid = ?", + /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ + /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + + /* BLOCK_INSERT */ "insert into %_segments values (?)", + /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", + /* BLOCK_DELETE */ "delete from %_segments where rowid between ? and ?", + + /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", + /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", + /* SEGDIR_SELECT */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? order by idx", + /* SEGDIR_SPAN */ + "select min(start_block), max(end_block) from %_segdir " + " where level = ? and start_block <> 0", + /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", + /* SEGDIR_SELECT_ALL */ + "select root, leaves_end_block from %_segdir order by level desc, idx", +}; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct fulltext_vtab { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of columns in virtual table */ + char **azColumn; /* column names. malloced */ + char **azContentColumn; /* column names in content table; malloced */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; + + /* Precompiled statements used for segment merges. We run a + ** separate select across the leaf level of each tree being merged. + */ + sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; + /* The statement used to prepare pLeafSelectStmts. */ +#define LEAF_SELECT \ + "select block from %_segments where rowid between ? and ? order by rowid" + + /* These buffer pending index updates during transactions. + ** nPendingData estimates the memory size of the pending data. It + ** doesn't include the hash-bucket overhead, nor any malloc + ** overhead. When nPendingData exceeds kPendingThreshold, the + ** buffer is flushed even before the transaction closes. + ** pendingTerms stores the data, and is only valid when nPendingData + ** is >=0 (nPendingData<0 means pendingTerms has not been + ** initialized). iPrevDocid is the last docid written, used to make + ** certain we're inserting in sorted order. + */ + int nPendingData; +#define kPendingThreshold (1*1024*1024) + sqlite_int64 iPrevDocid; + fts2Hash pendingTerms; +}; + +/* +** When the core wants to do a query, it create a cursor using a +** call to xOpen. This structure is an instance of a cursor. It +** is destroyed by xClose. +*/ +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + int eof; /* True if at End Of Results */ + Query q; /* Parsed query string */ + Snippet snippet; /* Cached snippet for the current row */ + int iColumn; /* Column being searched */ + DataBuffer result; /* Doclist results from fulltextQuery */ + DLReader reader; /* Result reader if result not empty */ +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static const sqlite3_module fts2Module; /* forward declaration */ + +/* Return a dynamically generated statement of the form + * insert into %_content (rowid, ...) values (?, ...) + */ +static const char *contentInsertStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "insert into %_content (rowid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, ") values (?"); + for(i=0; inColumn; ++i) + append(&sb, ", ?"); + append(&sb, ")"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * update %_content set [col_0] = ?, [col_1] = ?, ... + * where rowid = ? + */ +static const char *contentUpdateStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "update %_content set "); + for(i=0; inColumn; ++i) { + if( i>0 ){ + append(&sb, ", "); + } + append(&sb, v->azContentColumn[i]); + append(&sb, " = ?"); + } + append(&sb, " where rowid = ?"); + return stringBufferData(&sb); +} + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmtpFulltextStatements[iStmt]==NULL ){ + const char *zStmt; + int rc; + switch( iStmt ){ + case CONTENT_INSERT_STMT: + zStmt = contentInsertStatement(v); break; + case CONTENT_UPDATE_STMT: + zStmt = contentUpdateStatement(v); break; + default: + zStmt = fulltext_zStatement[iStmt]; + } + rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], + zStmt); + if( zStmt != fulltext_zStatement[iStmt]) free((void *) zStmt); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and +** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, +** where we expect no results. +*/ +static int sql_single_step(sqlite3_stmt *s){ + int rc = sqlite3_step(s); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* Like sql_get_statement(), but for special replicated LEAF_SELECT +** statements. +*/ +/* TODO(shess) Write version for generic statements and then share +** that between the cached-statement functions. +*/ +static int sql_get_leaf_statement(fulltext_vtab *v, int idx, + sqlite3_stmt **ppStmt){ + assert( idx>=0 && idxpLeafSelectStmts[idx]==NULL ){ + int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], + LEAF_SELECT); + if( rc!=SQLITE_OK ) return rc; + }else{ + int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pLeafSelectStmts[idx]; + return SQLITE_OK; +} + +/* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ +static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, + sqlite3_value **pValues){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 2+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + return sql_single_step(s); +} + +/* update %_content set col0 = pValues[0], col1 = pValues[1], ... + * where rowid = [iRowid] */ +static int content_update(fulltext_vtab *v, sqlite3_value **pValues, + sqlite_int64 iRowid){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 1+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +static void freeStringArray(int nString, const char **pString){ + int i; + + for (i=0 ; i < nString ; ++i) { + if( pString[i]!=NULL ) free((void *) pString[i]); + } + free((void *) pString); +} + +/* select * from %_content where rowid = [iRow] + * The caller must delete the returned array and all strings in it. + * null fields will be NULL in the returned array. + * + * TODO: Perhaps we should return pointer/length strings here for consistency + * with other code which uses pointer/length. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iRow, + const char ***pValues){ + sqlite3_stmt *s; + const char **values; + int i; + int rc; + + *pValues = NULL; + + rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + values = (const char **) malloc(v->nColumn * sizeof(const char *)); + for(i=0; inColumn; ++i){ + if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + values[i] = NULL; + }else{ + values[i] = string_dup((char*)sqlite3_column_text(s, i)); + } + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + *pValues = values; + return SQLITE_OK; + } + + freeStringArray(v->nColumn, values); + return rc; +} + +/* delete from %_content where rowid = [iRow ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* insert into %_segments values ([pData]) +** returns assigned rowid in *piBlockid +*/ +static int block_insert(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 *piBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + *piBlockid = sqlite3_last_insert_rowid(v->db); + return SQLITE_OK; +} + +/* delete from %_segments +** where rowid between [iStartBlockid] and [iEndBlockid] +** +** Deletes the range of blocks, inclusive, used to delete the blocks +** which form a segment. +*/ +static int block_delete(fulltext_vtab *v, + sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW with *pidx set to the maximum segment idx found +** at iLevel. Returns SQLITE_DONE if there are no segments at +** iLevel. Otherwise returns an error. +*/ +static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* Should always get at least one row due to how max() works. */ + if( rc==SQLITE_DONE ) return SQLITE_DONE; + if( rc!=SQLITE_ROW ) return rc; + + /* NULL means that there were no inputs to max(). */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; + } + + *pidx = sqlite3_column_int(s, 0); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* insert into %_segdir values ( +** [iLevel], [idx], +** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], +** [pRootData] +** ) +*/ +static int segdir_set(fulltext_vtab *v, int iLevel, int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iLeavesEndBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 2, idx); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 3, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 5, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Queries %_segdir for the block span of the segments in level +** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, +** SQLITE_ROW if there are blocks, else an error. +*/ +static int segdir_span(fulltext_vtab *v, int iLevel, + sqlite_int64 *piStartBlockid, + sqlite_int64 *piEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ + if( rc!=SQLITE_ROW ) return rc; + + /* This happens if all segments at this level are entirely inline. */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + int rc2 = sqlite3_step(s); + if( rc2==SQLITE_ROW ) return SQLITE_ERROR; + return rc2; + } + + *piStartBlockid = sqlite3_column_int64(s, 0); + *piEndBlockid = sqlite3_column_int64(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* Delete the segment blocks and segment directory records for all +** segments at iLevel. +*/ +static int segdir_delete(fulltext_vtab *v, int iLevel){ + sqlite3_stmt *s; + sqlite_int64 iStartBlockid, iEndBlockid; + int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; + + if( rc==SQLITE_ROW ){ + rc = block_delete(v, iStartBlockid, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + } + + /* Delete the segment directory itself. */ + rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* TODO(shess) clearPendingTerms() is far down the file because +** writeZeroSegment() is far down the file because LeafWriter is far +** down the file. Consider refactoring the code to move the non-vtab +** code above the vtab code so that we don't need this forward +** reference. +*/ +static int clearPendingTerms(fulltext_vtab *v); + +/* +** Free the memory used to contain a fulltext_vtab structure. +*/ +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt, i; + + TRACE(("FTS2 Destroy %p\n", v)); + for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + for( i=0; ipLeafSelectStmts[i]!=NULL ){ + sqlite3_finalize(v->pLeafSelectStmts[i]); + v->pLeafSelectStmts[i] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + clearPendingTerms(v); + + free(v->azColumn); + for(i = 0; i < v->nColumn; ++i) { + sqlite3_free(v->azContentColumn[i]); + } + free(v->azContentColumn); + free(v); +} + +/* +** Token types for parsing the arguments to xConnect or xCreate. +*/ +#define TOKEN_EOF 0 /* End of file */ +#define TOKEN_SPACE 1 /* Any kind of whitespace */ +#define TOKEN_ID 2 /* An identifier */ +#define TOKEN_STRING 3 /* A string literal */ +#define TOKEN_PUNCT 4 /* A single punctuation character */ + +/* +** If X is a character that can be used in an identifier then +** IdChar(X) will be true. Otherwise it is false. +** +** For ASCII, any character with the high-order bit set is +** allowed in an identifier. For 7-bit characters, +** sqlite3IsIdChar[X] must be 1. +** +** Ticket #1066. the SQL standard does not allow '$' in the +** middle of identfiers. But many SQL implementations do. +** SQLite will allow '$' in identifiers for compatibility. +** But the feature is undocumented. +*/ +static const char isIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) + + +/* +** Return the length of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +static int getToken(const char *z, int *tokenType){ + int i, c; + switch( *z ){ + case 0: { + *tokenType = TOKEN_EOF; + return 0; + } + case ' ': case '\t': case '\n': case '\f': case '\r': { + for(i=1; safe_isspace(z[i]); i++){} + *tokenType = TOKEN_SPACE; + return i; + } + case '`': + case '\'': + case '"': { + int delim = z[0]; + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + *tokenType = TOKEN_STRING; + return i + (c!=0); + } + case '[': { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = TOKEN_ID; + return i; + } + default: { + if( !IdChar(*z) ){ + break; + } + for(i=1; IdChar(z[i]); i++){} + *tokenType = TOKEN_ID; + return i; + } + } + *tokenType = TOKEN_PUNCT; + return 1; +} + +/* +** A token extracted from a string is an instance of the following +** structure. +*/ +typedef struct Token { + const char *z; /* Pointer to token text. Not '\000' terminated */ + short int n; /* Length of the token text in bytes. */ +} Token; + +/* +** Given a input string (which is really one of the argv[] parameters +** passed into xConnect or xCreate) split the string up into tokens. +** Return an array of pointers to '\000' terminated strings, one string +** for each non-whitespace token. +** +** The returned array is terminated by a single NULL pointer. +** +** Space to hold the returned array is obtained from a single +** malloc and should be freed by passing the return value to free(). +** The individual strings within the token list are all a part of +** the single memory allocation and will all be freed at once. +*/ +static char **tokenizeString(const char *z, int *pnToken){ + int nToken = 0; + Token *aToken = malloc( strlen(z) * sizeof(aToken[0]) ); + int n = 1; + int e, i; + int totalSize = 0; + char **azToken; + char *zCopy; + while( n>0 ){ + n = getToken(z, &e); + if( e!=TOKEN_SPACE ){ + aToken[nToken].z = z; + aToken[nToken].n = n; + nToken++; + totalSize += n+1; + } + z += n; + } + azToken = (char**)malloc( nToken*sizeof(char*) + totalSize ); + zCopy = (char*)&azToken[nToken]; + nToken--; + for(i=0; i=0 ){ + azIn[j] = azIn[i]; + } + j++; + } + } + azIn[j] = 0; + } +} + + +/* +** Find the first alphanumeric token in the string zIn. Null-terminate +** this token. Remove any quotation marks. And return a pointer to +** the result. +*/ +static char *firstToken(char *zIn, char **pzTail){ + int n, ttype; + while(1){ + n = getToken(zIn, &ttype); + if( ttype==TOKEN_SPACE ){ + zIn += n; + }else if( ttype==TOKEN_EOF ){ + *pzTail = zIn; + return 0; + }else{ + zIn[n] = 0; + *pzTail = &zIn[1]; + dequoteString(zIn); + return zIn; + } + } + /*NOTREACHED*/ +} + +/* Return true if... +** +** * s begins with the string t, ignoring case +** * s is longer than t +** * The first character of s beyond t is not a alphanumeric +** +** Ignore leading space in *s. +** +** To put it another way, return true if the first token of +** s[] is t[]. +*/ +static int startsWith(const char *s, const char *t){ + while( safe_isspace(*s) ){ s++; } + while( *t ){ + if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; + } + return *s!='_' && !safe_isalnum(*s); +} + +/* +** An instance of this structure defines the "spec" of a +** full text index. This structure is populated by parseSpec +** and use by fulltextConnect and fulltextCreate. +*/ +typedef struct TableSpec { + const char *zDb; /* Logical database name */ + const char *zName; /* Name of the full-text index */ + int nColumn; /* Number of columns to be indexed */ + char **azColumn; /* Original names of columns to be indexed */ + char **azContentColumn; /* Column names for %_content */ + char **azTokenizer; /* Name of tokenizer and its arguments */ +} TableSpec; + +/* +** Reclaim all of the memory used by a TableSpec +*/ +static void clearTableSpec(TableSpec *p) { + free(p->azColumn); + free(p->azContentColumn); + free(p->azTokenizer); +} + +/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: + * + * CREATE VIRTUAL TABLE email + * USING fts2(subject, body, tokenize mytokenizer(myarg)) + * + * We return parsed information in a TableSpec structure. + * + */ +static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, + char**pzErr){ + int i, n; + char *z, *zDummy; + char **azArg; + const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ + + assert( argc>=3 ); + /* Current interface: + ** argv[0] - module name + ** argv[1] - database name + ** argv[2] - table name + ** argv[3..] - columns, optionally followed by tokenizer specification + ** and snippet delimiters specification. + */ + + /* Make a copy of the complete argv[][] array in a single allocation. + ** The argv[][] array is read-only and transient. We can write to the + ** copy in order to modify things and the copy is persistent. + */ + CLEAR(pSpec); + for(i=n=0; izDb = azArg[1]; + pSpec->zName = azArg[2]; + pSpec->nColumn = 0; + pSpec->azColumn = azArg; + zTokenizer = "tokenize simple"; + for(i=3; inColumn] = firstToken(azArg[i], &zDummy); + pSpec->nColumn++; + } + } + if( pSpec->nColumn==0 ){ + azArg[0] = "content"; + pSpec->nColumn = 1; + } + + /* + ** Construct the list of content column names. + ** + ** Each content column name will be of the form cNNAAAA + ** where NN is the column number and AAAA is the sanitized + ** column name. "sanitized" means that special characters are + ** converted to "_". The cNN prefix guarantees that all column + ** names are unique. + ** + ** The AAAA suffix is not strictly necessary. It is included + ** for the convenience of people who might examine the generated + ** %_content table and wonder what the columns are used for. + */ + pSpec->azContentColumn = malloc( pSpec->nColumn * sizeof(char *) ); + if( pSpec->azContentColumn==0 ){ + clearTableSpec(pSpec); + return SQLITE_NOMEM; + } + for(i=0; inColumn; i++){ + char *p; + pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); + for (p = pSpec->azContentColumn[i]; *p ; ++p) { + if( !safe_isalnum(*p) ) *p = '_'; + } + } + + /* + ** Parse the tokenizer specification string. + */ + pSpec->azTokenizer = tokenizeString(zTokenizer, &n); + tokenListToIdList(pSpec->azTokenizer); + + return SQLITE_OK; +} + +/* +** Generate a CREATE TABLE statement that describes the schema of +** the virtual table. Return a pointer to this schema string. +** +** Space is obtained from sqlite3_mprintf() and should be freed +** using sqlite3_free(). +*/ +static char *fulltextSchema( + int nColumn, /* Number of columns */ + const char *const* azColumn, /* List of columns */ + const char *zTableName /* Name of the table */ +){ + int i; + char *zSchema, *zNext; + const char *zSep = "("; + zSchema = sqlite3_mprintf("CREATE TABLE x"); + for(i=0; ibase */ + v->db = db; + v->zDb = spec->zDb; /* Freed when azColumn is freed */ + v->zName = spec->zName; /* Freed when azColumn is freed */ + v->nColumn = spec->nColumn; + v->azContentColumn = spec->azContentColumn; + spec->azContentColumn = 0; + v->azColumn = spec->azColumn; + spec->azColumn = 0; + + if( spec->azTokenizer==0 ){ + return SQLITE_NOMEM; + } + + zTok = spec->azTokenizer[0]; + if( !zTok ){ + zTok = "simple"; + } + nTok = strlen(zTok)+1; + + m = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zTok, nTok); + if( !m ){ + *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); + rc = SQLITE_ERROR; + goto err; + } + + for(n=0; spec->azTokenizer[n]; n++){} + if( n ){ + rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], + &v->pTokenizer); + }else{ + rc = m->xCreate(0, 0, &v->pTokenizer); + } + if( rc!=SQLITE_OK ) goto err; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, + spec->zName); + rc = sqlite3_declare_vtab(db, schema); + sqlite3_free(schema); + if( rc!=SQLITE_OK ) goto err; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + /* Indicate that the buffer is not live. */ + v->nPendingData = -1; + + *ppVTab = &v->base; + TRACE(("FTS2 Connect %p\n", v)); + + return rc; + +err: + fulltext_vtab_destroy(v); + return rc; +} + +static int fulltextConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, + char **pzErr +){ + TableSpec spec; + int rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); + clearTableSpec(&spec); + return rc; +} + +/* The %_content table holds the text of each document, with +** the rowid used as the docid. +*/ +/* TODO(shess) This comment needs elaboration to match the updated +** code. Work it into the top-of-file comment at that time. +*/ +static int fulltextCreate(sqlite3 *db, void *pAux, + int argc, const char * const *argv, + sqlite3_vtab **ppVTab, char **pzErr){ + int rc; + TableSpec spec; + StringBuffer schema; + TRACE(("FTS2 Create\n")); + + rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + initStringBuffer(&schema); + append(&schema, "CREATE TABLE %_content("); + appendList(&schema, spec.nColumn, spec.azContentColumn); + append(&schema, ")"); + rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); + stringBufferDestroy(&schema); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segments(block blob);"); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segdir(" + " level integer," + " idx integer," + " start_block integer," + " leaves_end_block integer," + " end_block integer," + " root blob," + " primary key(level, idx)" + ");"); + if( rc!=SQLITE_OK ) goto out; + + rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); + +out: + clearTableSpec(&spec); + return rc; +} + +/* Decide how to handle an SQL query. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + int i; + TRACE(("FTS2 BestIndex\n")); + + for(i=0; inConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->usable ) { + if( pConstraint->iColumn==-1 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ + TRACE(("FTS2 QUERY_ROWID\n")); + } else if( pConstraint->iColumn>=0 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* full-text search */ + pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; + TRACE(("FTS2 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); + } else continue; + + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + + /* An arbitrary value for now. + * TODO: Perhaps rowid matches should be considered cheaper than + * full-text searches. */ + pInfo->estimatedCost = 1.0; + + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + TRACE(("FTS2 Disconnect %p\n", pVTab)); + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int rc; + + TRACE(("FTS2 Destroy %p\n", pVTab)); + rc = sql_exec(v->db, v->zDb, v->zName, + "drop table if exists %_content;" + "drop table if exists %_segments;" + "drop table if exists %_segdir;" + ); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + TRACE(("FTS2 Open %p: %p\n", pVTab, c)); + + return SQLITE_OK; +} + + +/* Free all of the dynamically allocated memory held by *q +*/ +static void queryClear(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + free(q->pTerms[i].pTerm); + } + free(q->pTerms); + CLEAR(q); +} + +/* Free all of the dynamically allocated memory held by the +** Snippet +*/ +static void snippetClear(Snippet *p){ + free(p->aMatch); + free(p->zOffset); + free(p->zSnippet); + CLEAR(p); +} +/* +** Append a single entry to the p->aMatch[] log. +*/ +static void snippetAppendMatch( + Snippet *p, /* Append the entry to this snippet */ + int iCol, int iTerm, /* The column and query term */ + int iStart, int nByte /* Offset and size of the match */ +){ + int i; + struct snippetMatch *pMatch; + if( p->nMatch+1>=p->nAlloc ){ + p->nAlloc = p->nAlloc*2 + 10; + p->aMatch = realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + if( p->aMatch==0 ){ + p->nMatch = 0; + p->nAlloc = 0; + return; + } + } + i = p->nMatch++; + pMatch = &p->aMatch[i]; + pMatch->iCol = iCol; + pMatch->iTerm = iTerm; + pMatch->iStart = iStart; + pMatch->nByte = nByte; +} + +/* +** Sizing information for the circular buffer used in snippetOffsetsOfColumn() +*/ +#define FTS2_ROTOR_SZ (32) +#define FTS2_ROTOR_MASK (FTS2_ROTOR_SZ-1) + +/* +** Add entries to pSnippet->aMatch[] for every match that occurs against +** document zDoc[0..nDoc-1] which is stored in column iColumn. +*/ +static void snippetOffsetsOfColumn( + Query *pQuery, + Snippet *pSnippet, + int iColumn, + const char *zDoc, + int nDoc +){ + const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ + sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ + sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ + fulltext_vtab *pVtab; /* The full text index */ + int nColumn; /* Number of columns in the index */ + const QueryTerm *aTerm; /* Query string terms */ + int nTerm; /* Number of query string terms */ + int i, j; /* Loop counters */ + int rc; /* Return code */ + unsigned int match, prevMatch; /* Phrase search bitmasks */ + const char *zToken; /* Next token from the tokenizer */ + int nToken; /* Size of zToken */ + int iBegin, iEnd, iPos; /* Offsets of beginning and end */ + + /* The following variables keep a circular buffer of the last + ** few tokens */ + unsigned int iRotor = 0; /* Index of current token */ + int iRotorBegin[FTS2_ROTOR_SZ]; /* Beginning offset of token */ + int iRotorLen[FTS2_ROTOR_SZ]; /* Length of token */ + + pVtab = pQuery->pFts; + nColumn = pVtab->nColumn; + pTokenizer = pVtab->pTokenizer; + pTModule = pTokenizer->pModule; + rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); + if( rc ) return; + pTCursor->pTokenizer = pTokenizer; + aTerm = pQuery->pTerms; + nTerm = pQuery->nTerms; + if( nTerm>=FTS2_ROTOR_SZ ){ + nTerm = FTS2_ROTOR_SZ - 1; + } + prevMatch = 0; + while(1){ + rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); + if( rc ) break; + iRotorBegin[iRotor&FTS2_ROTOR_MASK] = iBegin; + iRotorLen[iRotor&FTS2_ROTOR_MASK] = iEnd-iBegin; + match = 0; + for(i=0; i=0 && iColnToken ) continue; + if( !aTerm[i].isPrefix && aTerm[i].nTerm1 && (prevMatch & (1<=0; j--){ + int k = (iRotor-j) & FTS2_ROTOR_MASK; + snippetAppendMatch(pSnippet, iColumn, i-j, + iRotorBegin[k], iRotorLen[k]); + } + } + } + prevMatch = match<<1; + iRotor++; + } + pTModule->xClose(pTCursor); +} + + +/* +** Compute all offsets for the current row of the query. +** If the offsets have already been computed, this routine is a no-op. +*/ +static void snippetAllOffsets(fulltext_cursor *p){ + int nColumn; + int iColumn, i; + int iFirst, iLast; + fulltext_vtab *pFts; + + if( p->snippet.nMatch ) return; + if( p->q.nTerms==0 ) return; + pFts = p->q.pFts; + nColumn = pFts->nColumn; + iColumn = (p->iCursorType - QUERY_FULLTEXT); + if( iColumn<0 || iColumn>=nColumn ){ + iFirst = 0; + iLast = nColumn-1; + }else{ + iFirst = iColumn; + iLast = iColumn; + } + for(i=iFirst; i<=iLast; i++){ + const char *zDoc; + int nDoc; + zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); + nDoc = sqlite3_column_bytes(p->pStmt, i+1); + snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); + } +} + +/* +** Convert the information in the aMatch[] array of the snippet +** into the string zOffset[0..nOffset-1]. +*/ +static void snippetOffsetText(Snippet *p){ + int i; + int cnt = 0; + StringBuffer sb; + char zBuf[200]; + if( p->zOffset ) return; + initStringBuffer(&sb); + for(i=0; inMatch; i++){ + struct snippetMatch *pMatch = &p->aMatch[i]; + zBuf[0] = ' '; + sprintf(&zBuf[cnt>0], "%d %d %d %d", pMatch->iCol, + pMatch->iTerm, pMatch->iStart, pMatch->nByte); + append(&sb, zBuf); + cnt++; + } + p->zOffset = stringBufferData(&sb); + p->nOffset = stringBufferLength(&sb); +} + +/* +** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set +** of matching words some of which might be in zDoc. zDoc is column +** number iCol. +** +** iBreak is suggested spot in zDoc where we could begin or end an +** excerpt. Return a value similar to iBreak but possibly adjusted +** to be a little left or right so that the break point is better. +*/ +static int wordBoundary( + int iBreak, /* The suggested break point */ + const char *zDoc, /* Document text */ + int nDoc, /* Number of bytes in zDoc[] */ + struct snippetMatch *aMatch, /* Matching words */ + int nMatch, /* Number of entries in aMatch[] */ + int iCol /* The column number for zDoc[] */ +){ + int i; + if( iBreak<=10 ){ + return 0; + } + if( iBreak>=nDoc-10 ){ + return nDoc; + } + for(i=0; i0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ + return aMatch[i-1].iStart; + } + } + for(i=1; i<=10; i++){ + if( safe_isspace(zDoc[iBreak-i]) ){ + return iBreak - i + 1; + } + if( safe_isspace(zDoc[iBreak+i]) ){ + return iBreak + i + 1; + } + } + return iBreak; +} + + + +/* +** Allowed values for Snippet.aMatch[].snStatus +*/ +#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ +#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ + +/* +** Generate the text of a snippet. +*/ +static void snippetText( + fulltext_cursor *pCursor, /* The cursor we need the snippet for */ + const char *zStartMark, /* Markup to appear before each match */ + const char *zEndMark, /* Markup to appear after each match */ + const char *zEllipsis /* Ellipsis mark */ +){ + int i, j; + struct snippetMatch *aMatch; + int nMatch; + int nDesired; + StringBuffer sb; + int tailCol; + int tailOffset; + int iCol; + int nDoc; + const char *zDoc; + int iStart, iEnd; + int tailEllipsis = 0; + int iMatch; + + + free(pCursor->snippet.zSnippet); + pCursor->snippet.zSnippet = 0; + aMatch = pCursor->snippet.aMatch; + nMatch = pCursor->snippet.nMatch; + initStringBuffer(&sb); + + for(i=0; iq.nTerms; i++){ + for(j=0; j0; i++){ + if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; + nDesired--; + iCol = aMatch[i].iCol; + zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); + iStart = aMatch[i].iStart - 40; + iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); + if( iStart<=10 ){ + iStart = 0; + } + if( iCol==tailCol && iStart<=tailOffset+20 ){ + iStart = tailOffset; + } + if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ + trimWhiteSpace(&sb); + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + appendWhiteSpace(&sb); + } + iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; + iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); + if( iEnd>=nDoc-10 ){ + iEnd = nDoc; + tailEllipsis = 0; + }else{ + tailEllipsis = 1; + } + while( iMatchsnippet.zSnippet = stringBufferData(&sb); + pCursor->snippet.nSnippet = stringBufferLength(&sb); +} + + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + TRACE(("FTS2 Close %p\n", c)); + sqlite3_finalize(c->pStmt); + queryClear(&c->q); + snippetClear(&c->snippet); + if( c->result.nData!=0 ) dlrDestroy(&c->reader); + dataBufferDestroy(&c->result); + free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + int rc; + + TRACE(("FTS2 Next %p\n", pCursor)); + snippetClear(&c->snippet); + if( c->iCursorType < QUERY_FULLTEXT ){ + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + } else { /* full-text query */ + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ + c->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); + dlrStep(&c->reader); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + } +} + + +/* TODO(shess) If we pushed LeafReader to the top of the file, or to +** another file, term_select() could be pushed above +** docListOfTerm(). +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out); + +/* Return a DocList corresponding to the query term *pTerm. If *pTerm +** is the first term of a phrase query, go ahead and evaluate the phrase +** query and return the doclist for the entire phrase query. +** +** The resulting DL_DOCIDS doclist is stored in pResult, which is +** overwritten. +*/ +static int docListOfTerm( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* column to restrict to. No restriction if >=nColumn */ + QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ + DataBuffer *pResult /* Write the result here */ +){ + DataBuffer left, right, new; + int i, rc; + + /* No phrase search if no position info. */ + assert( pQTerm->nPhrase==0 || DL_DEFAULT!=DL_DOCIDS ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&left, 0); + rc = termSelect(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pQTerm->isPrefix, + 0nPhrase ? DL_POSITIONS : DL_DOCIDS, &left); + if( rc ) return rc; + for(i=1; i<=pQTerm->nPhrase && left.nData>0; i++){ + dataBufferInit(&right, 0); + rc = termSelect(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, + pQTerm[i].isPrefix, DL_POSITIONS, &right); + if( rc ){ + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListPhraseMerge(left.pData, left.nData, right.pData, right.nData, + inPhrase ? DL_POSITIONS : DL_DOCIDS, &new); + dataBufferDestroy(&left); + dataBufferDestroy(&right); + left = new; + } + *pResult = left; + return SQLITE_OK; +} + +/* Add a new term pTerm[0..nTerm-1] to the query *q. +*/ +static void queryAdd(Query *q, const char *pTerm, int nTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerms = realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); + if( q->pTerms==0 ){ + q->nTerms = 0; + return; + } + t = &q->pTerms[q->nTerms - 1]; + CLEAR(t); + t->pTerm = malloc(nTerm+1); + memcpy(t->pTerm, pTerm, nTerm); + t->pTerm[nTerm] = 0; + t->nTerm = nTerm; + t->isOr = q->nextIsOr; + t->isPrefix = 0; + q->nextIsOr = 0; + t->iColumn = q->nextColumn; + q->nextColumn = q->dfltColumn; +} + +/* +** Check to see if the string zToken[0...nToken-1] matches any +** column name in the virtual table. If it does, +** return the zero-indexed column number. If not, return -1. +*/ +static int checkColumnSpecifier( + fulltext_vtab *pVtab, /* The virtual table */ + const char *zToken, /* Text of the token */ + int nToken /* Number of characters in the token */ +){ + int i; + for(i=0; inColumn; i++){ + if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 + && pVtab->azColumn[i][nToken]==0 ){ + return i; + } + } + return -1; +} + +/* +** Parse the text at pSegment[0..nSegment-1]. Add additional terms +** to the query being assemblied in pQuery. +** +** inPhrase is true if pSegment[0..nSegement-1] is contained within +** double-quotes. If inPhrase is true, then the first term +** is marked with the number of terms in the phrase less one and +** OR and "-" syntax is ignored. If inPhrase is false, then every +** term found is marked with nPhrase=0 and OR and "-" syntax is significant. +*/ +static int tokenizeSegment( + sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ + const char *pSegment, int nSegment, /* Query expression being parsed */ + int inPhrase, /* True if within "..." */ + Query *pQuery /* Append results here */ +){ + const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int firstIndex = pQuery->nTerms; + int iCol; + int nTerm = 1; + + int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *pToken; + int nToken, iBegin, iEnd, iPos; + + rc = pModule->xNext(pCursor, + &pToken, &nToken, + &iBegin, &iEnd, &iPos); + if( rc!=SQLITE_OK ) break; + if( !inPhrase && + pSegment[iEnd]==':' && + (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ + pQuery->nextColumn = iCol; + continue; + } + if( !inPhrase && pQuery->nTerms>0 && nToken==2 + && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ + pQuery->nextIsOr = 1; + continue; + } + queryAdd(pQuery, pToken, nToken); + if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ + pQuery->pTerms[pQuery->nTerms-1].isNot = 1; + } + if( iEndpTerms[pQuery->nTerms-1].isPrefix = 1; + } + pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; + if( inPhrase ){ + nTerm++; + } + } + + if( inPhrase && pQuery->nTerms>firstIndex ){ + pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object pQuery. +** +** The calling function will need to queryClear() to clean up +** the dynamically allocated memory held by pQuery. +*/ +static int parseQuery( + fulltext_vtab *v, /* The fulltext index */ + const char *zInput, /* Input text of the query string */ + int nInput, /* Size of the input text */ + int dfltColumn, /* Default column of the index to match against */ + Query *pQuery /* Write the parse results here. */ +){ + int iInput, inPhrase = 0; + + if( zInput==0 ) nInput = 0; + if( nInput<0 ) nInput = strlen(zInput); + pQuery->nTerms = 0; + pQuery->pTerms = NULL; + pQuery->nextIsOr = 0; + pQuery->nextColumn = dfltColumn; + pQuery->dfltColumn = dfltColumn; + pQuery->pFts = v; + + for(iInput=0; iInputiInput ){ + tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, + pQuery); + } + iInput = i; + if( i=nColumn +** they are allowed to match against any column. +*/ +static int fulltextQuery( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* Match against this column by default */ + const char *zInput, /* The query string */ + int nInput, /* Number of bytes in zInput[] */ + DataBuffer *pResult, /* Write the result doclist here */ + Query *pQuery /* Put parsed query string here */ +){ + int i, iNext, rc; + DataBuffer left, right, or, new; + int nNot = 0; + QueryTerm *aTerm; + + /* TODO(shess) Instead of flushing pendingTerms, we could query for + ** the relevant term and merge the doclist into what we receive from + ** the database. Wait and see if this is a common issue, first. + ** + ** A good reason not to flush is to not generate update-related + ** error codes from here. + */ + + /* Flush any buffered updates before executing the query. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + + /* TODO(shess) I think that the queryClear() calls below are not + ** necessary, because fulltextClose() already clears the query. + */ + rc = parseQuery(v, zInput, nInput, iColumn, pQuery); + if( rc!=SQLITE_OK ) return rc; + + /* Empty or NULL queries return no results. */ + if( pQuery->nTerms==0 ){ + dataBufferInit(pResult, 0); + return SQLITE_OK; + } + + /* Merge AND terms. */ + /* TODO(shess) I think we can early-exit if( i>nNot && left.nData==0 ). */ + aTerm = pQuery->pTerms; + for(i = 0; inTerms; i=iNext){ + if( aTerm[i].isNot ){ + /* Handle all NOT terms in a separate pass */ + nNot++; + iNext = i + aTerm[i].nPhrase+1; + continue; + } + iNext = i + aTerm[i].nPhrase + 1; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + queryClear(pQuery); + return rc; + } + while( iNextnTerms && aTerm[iNext].isOr ){ + rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &or); + iNext += aTerm[iNext].nPhrase + 1; + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + dataBufferDestroy(&right); + queryClear(pQuery); + return rc; + } + dataBufferInit(&new, 0); + docListOrMerge(right.pData, right.nData, or.pData, or.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&or); + right = new; + } + if( i==nNot ){ /* first term processed. */ + left = right; + }else{ + dataBufferInit(&new, 0); + docListAndMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + } + + if( nNot==pQuery->nTerms ){ + /* We do not yet know how to handle a query of only NOT terms */ + return SQLITE_ERROR; + } + + /* Do the EXCEPT terms */ + for(i=0; inTerms; i += aTerm[i].nPhrase + 1){ + if( !aTerm[i].isNot ) continue; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + queryClear(pQuery); + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListExceptMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + + *pResult = left; + return rc; +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==QUERY_GENERIC then do a full table scan against +** the %_content table. +** +** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry +** in the %_content table. +** +** If idxNum>=QUERY_FULLTEXT then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fulltextFilter() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts2 as appropriate. +*/ +static int fulltextFilter( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, const char *idxStr, /* Which indexing scheme to use */ + int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ +){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + char *zSql; + + TRACE(("FTS2 Filter %p\n",pCursor)); + + zSql = sqlite3_mprintf("select rowid, * from %%_content %s", + idxNum==QUERY_GENERIC ? "" : "where rowid=?"); + sqlite3_finalize(c->pStmt); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ) return rc; + + c->iCursorType = idxNum; + switch( idxNum ){ + case QUERY_GENERIC: + break; + + case QUERY_ROWID: + rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); + if( rc!=SQLITE_OK ) return rc; + break; + + default: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + assert( idxNum<=QUERY_FULLTEXT+v->nColumn); + assert( argc==1 ); + queryClear(&c->q); + if( c->result.nData!=0 ){ + /* This case happens if the same cursor is used repeatedly. */ + dlrDestroy(&c->reader); + dataBufferReset(&c->result); + }else{ + dataBufferInit(&c->result, 0); + } + rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q); + if( rc!=SQLITE_OK ) return rc; + if( c->result.nData!=0 ){ + dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); + } + break; + } + } + + return fulltextNext(pCursor); +} + +/* This is the xEof method of the virtual table. The SQLite core +** calls this routine to find out if it has reached the end of +** a query's results set. +*/ +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +/* This is the xColumn method of the virtual table. The SQLite +** core calls this method during a query when it needs the value +** of a column from the virtual table. This method needs to use +** one of the sqlite3_result_*() routines to store the requested +** value back in the pContext. +*/ +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + + if( idxColnColumn ){ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); + sqlite3_result_value(pContext, pVal); + }else if( idxCol==v->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor + */ + sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); + } + return SQLITE_OK; +} + +/* This is the xRowid method. The SQLite core calls this routine to +** retrive the rowid for the current row of the result set. The +** rowid should be written to *pRowid. +*/ +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, +** we also store positions and offsets in the hash table using that +** column number. +*/ +static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, + const char *zText, int iColumn){ + sqlite3_tokenizer *pTokenizer = v->pTokenizer; + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + int rc; + + rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition) ){ + DLCollector *p; + int nData; /* Size of doclist before our update. */ + + /* Positions can't be negative; we use -1 as a terminator internally. */ + if( iPosition<0 ){ + pTokenizer->pModule->xClose(pCursor); + return SQLITE_ERROR; + } + + p = fts2HashFind(&v->pendingTerms, pToken, nTokenBytes); + if( p==NULL ){ + nData = 0; + p = dlcNew(iDocid, DL_DEFAULT); + fts2HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); + + /* Overhead for our hash table entry, the key, and the value. */ + v->nPendingData += sizeof(struct fts2HashElem)+sizeof(*p)+nTokenBytes; + }else{ + nData = p->b.nData; + if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); + } + if( iColumn>=0 ){ + dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); + } + + /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ + v->nPendingData += p->b.nData-nData; + } + + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + return rc; +} + +/* Add doclists for all terms in [pValues] to pendingTerms table. */ +static int insertTerms(fulltext_vtab *v, sqlite_int64 iRowid, + sqlite3_value **pValues){ + int i; + for(i = 0; i < v->nColumn ; ++i){ + char *zText = (char*)sqlite3_value_text(pValues[i]); + int rc = buildTerms(v, iRowid, zText, i); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Add empty doclists for all terms in the given row's content to +** pendingTerms. +*/ +static int deleteTerms(fulltext_vtab *v, sqlite_int64 iRowid){ + const char **pValues; + int i, rc; + + /* TODO(shess) Should we allow such tables at all? */ + if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; + + rc = content_select(v, iRowid, &pValues); + if( rc!=SQLITE_OK ) return rc; + + for(i = 0 ; i < v->nColumn; ++i) { + rc = buildTerms(v, iRowid, pValues[i], -1); + if( rc!=SQLITE_OK ) break; + } + + freeStringArray(v->nColumn, pValues); + return SQLITE_OK; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); + +/* Insert a row into the %_content table; set *piRowid to be the ID of the +** new row. Add doclists for terms to pendingTerms. +*/ +static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, + sqlite3_value **pValues, sqlite_int64 *piRowid){ + int rc; + + rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ + if( rc!=SQLITE_OK ) return rc; + + *piRowid = sqlite3_last_insert_rowid(v->db); + rc = initPendingTerms(v, *piRowid); + if( rc!=SQLITE_OK ) return rc; + + return insertTerms(v, *piRowid, pValues); +} + +/* Delete a row from the %_content table; add empty doclists for terms +** to pendingTerms. +*/ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + return content_delete(v, iRow); /* execute an SQL DELETE */ +} + +/* Update a row in the %_content table; add delete doclists to +** pendingTerms for old terms not in the new data, add insert doclists +** to pendingTerms for terms in the new data. +*/ +static int index_update(fulltext_vtab *v, sqlite_int64 iRow, + sqlite3_value **pValues){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + /* Generate an empty doclist for each term that previously appeared in this + * row. */ + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ + if( rc!=SQLITE_OK ) return rc; + + /* Now add positions for terms which appear in the updated row. */ + return insertTerms(v, iRow, pValues); +} + +/*******************************************************************/ +/* InteriorWriter is used to collect terms and block references into +** interior nodes in %_segments. See commentary at top of file for +** format. +*/ + +/* How large interior nodes can grow. */ +#define INTERIOR_MAX 2048 + +/* Minimum number of terms per interior node (except the root). This +** prevents large terms from making the tree too skinny - must be >0 +** so that the tree always makes progress. Note that the min tree +** fanout will be INTERIOR_MIN_TERMS+1. +*/ +#define INTERIOR_MIN_TERMS 7 +#if INTERIOR_MIN_TERMS<1 +# error INTERIOR_MIN_TERMS must be greater than 0. +#endif + +/* ROOT_MAX controls how much data is stored inline in the segment +** directory. +*/ +/* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's +** only here so that interiorWriterRootInfo() and leafWriterRootInfo() +** can both see it, but if the caller passed it in, we wouldn't even +** need a define. +*/ +#define ROOT_MAX 1024 +#if ROOT_MAXterm, 0); + dataBufferReplace(&block->term, pTerm, nTerm); + + n = putVarint(c, iHeight); + n += putVarint(c+n, iChildBlock); + dataBufferInit(&block->data, INTERIOR_MAX); + dataBufferReplace(&block->data, c, n); + + return block; +} + +#ifndef NDEBUG +/* Verify that the data is readable as an interior node. */ +static void interiorBlockValidate(InteriorBlock *pBlock){ + const char *pData = pBlock->data.pData; + int nData = pBlock->data.nData; + int n, iDummy; + sqlite_int64 iBlockid; + + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with height of node as a varint(n), n>0 */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n0 ); + assert( n<=nData ); + pData += n; + nData -= n; + + /* Zero or more terms of positive length */ + if( nData!=0 ){ + /* First term is not delta-encoded. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + + /* Following terms delta-encoded. */ + while( nData!=0 ){ + /* Length of shared prefix. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + } + } +} +#define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) +#else +#define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) +#endif + +typedef struct InteriorWriter { + int iHeight; /* from 0 at leaves. */ + InteriorBlock *first, *last; + struct InteriorWriter *parentWriter; + + DataBuffer term; /* Last term written to block "last". */ + sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ +#ifndef NDEBUG + sqlite_int64 iLastChildBlock; /* for consistency checks. */ +#endif +} InteriorWriter; + +/* Initialize an interior node where pTerm[nTerm] marks the leftmost +** term in the tree. iChildBlock is the leftmost child block at the +** next level down the tree. +*/ +static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, + sqlite_int64 iChildBlock, + InteriorWriter *pWriter){ + InteriorBlock *block; + assert( iHeight>0 ); + CLEAR(pWriter); + + pWriter->iHeight = iHeight; + pWriter->iOpeningChildBlock = iChildBlock; +#ifndef NDEBUG + pWriter->iLastChildBlock = iChildBlock; +#endif + block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); + pWriter->last = pWriter->first = block; + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + dataBufferInit(&pWriter->term, 0); +} + +/* Append the child node rooted at iChildBlock to the interior node, +** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. +*/ +static void interiorWriterAppend(InteriorWriter *pWriter, + const char *pTerm, int nTerm, + sqlite_int64 iChildBlock){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + + /* The first term written into an interior node is actually + ** associated with the second child added (the first child was added + ** in interiorWriterInit, or in the if clause at the bottom of this + ** function). That term gets encoded straight up, with nPrefix left + ** at 0. + */ + if( pWriter->term.nData==0 ){ + n = putVarint(c, nTerm); + }else{ + while( nPrefixterm.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + } + + n = putVarint(c, nPrefix); + n += putVarint(c+n, nTerm-nPrefix); + } + +#ifndef NDEBUG + pWriter->iLastChildBlock++; +#endif + assert( pWriter->iLastChildBlock==iChildBlock ); + + /* Overflow to a new block if the new term makes the current block + ** too big, and the current block already has enough terms. + */ + if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && + iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ + pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, + pTerm, nTerm); + pWriter->last = pWriter->last->next; + pWriter->iOpeningChildBlock = iChildBlock; + dataBufferReset(&pWriter->term); + }else{ + dataBufferAppend2(&pWriter->last->data, c, n, + pTerm+nPrefix, nTerm-nPrefix); + dataBufferReplace(&pWriter->term, pTerm, nTerm); + } + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); +} + +/* Free the space used by pWriter, including the linked-list of +** InteriorBlocks, and parentWriter, if present. +*/ +static int interiorWriterDestroy(InteriorWriter *pWriter){ + InteriorBlock *block = pWriter->first; + + while( block!=NULL ){ + InteriorBlock *b = block; + block = block->next; + dataBufferDestroy(&b->term); + dataBufferDestroy(&b->data); + free(b); + } + if( pWriter->parentWriter!=NULL ){ + interiorWriterDestroy(pWriter->parentWriter); + free(pWriter->parentWriter); + } + dataBufferDestroy(&pWriter->term); + SCRAMBLE(pWriter); + return SQLITE_OK; +} + +/* If pWriter can fit entirely in ROOT_MAX, return it as the root info +** directly, leaving *piEndBlockid unchanged. Otherwise, flush +** pWriter to %_segments, building a new layer of interior nodes, and +** recursively ask for their root into. +*/ +static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + InteriorBlock *block = pWriter->first; + sqlite_int64 iBlockid = 0; + int rc; + + /* If we can fit the segment inline */ + if( block==pWriter->last && block->data.nDatadata.pData; + *pnRootInfo = block->data.nData; + return SQLITE_OK; + } + + /* Flush the first block to %_segments, and create a new level of + ** interior node. + */ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + pWriter->parentWriter = malloc(sizeof(*pWriter->parentWriter)); + interiorWriterInit(pWriter->iHeight+1, + block->term.pData, block->term.nData, + iBlockid, pWriter->parentWriter); + + /* Flush additional blocks and append to the higher interior + ** node. + */ + for(block=block->next; block!=NULL; block=block->next){ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + interiorWriterAppend(pWriter->parentWriter, + block->term.pData, block->term.nData, iBlockid); + } + + /* Parent node gets the chance to be the root. */ + return interiorWriterRootInfo(v, pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/****************************************************************/ +/* InteriorReader is used to read off the data from an interior node +** (see comment at top of file for the format). +*/ +typedef struct InteriorReader { + const char *pData; + int nData; + + DataBuffer term; /* previous term, for decoding term delta. */ + + sqlite_int64 iBlockid; +} InteriorReader; + +static void interiorReaderDestroy(InteriorReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +/* TODO(shess) The assertions are great, but what if we're in NDEBUG +** and the blob is empty or otherwise contains suspect data? +*/ +static void interiorReaderInit(const char *pData, int nData, + InteriorReader *pReader){ + int n, nTerm; + + /* Require at least the leading flag byte */ + assert( nData>0 ); + assert( pData[0]!='\0' ); + + CLEAR(pReader); + + /* Decode the base blockid, and set the cursor to the first term. */ + n = getVarint(pData+1, &pReader->iBlockid); + assert( 1+n<=nData ); + pReader->pData = pData+1+n; + pReader->nData = nData-(1+n); + + /* A single-child interior node (such as when a leaf node was too + ** large for the segment directory) won't have any terms. + ** Otherwise, decode the first term. + */ + if( pReader->nData==0 ){ + dataBufferInit(&pReader->term, 0); + }else{ + n = getVarint32(pReader->pData, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); + assert( n+nTerm<=pReader->nData ); + pReader->pData += n+nTerm; + pReader->nData -= n+nTerm; + } +} + +static int interiorReaderAtEnd(InteriorReader *pReader){ + return pReader->term.nData==0; +} + +static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ + return pReader->iBlockid; +} + +static int interiorReaderTermBytes(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.nData; +} +static const char *interiorReaderTerm(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.pData; +} + +/* Step forward to the next term in the node. */ +static void interiorReaderStep(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + + /* If the last term has been read, signal eof, else construct the + ** next term. + */ + if( pReader->nData==0 ){ + dataBufferReset(&pReader->term); + }else{ + int n, nPrefix, nSuffix; + + n = getVarint32(pReader->pData, &nPrefix); + n += getVarint32(pReader->pData+n, &nSuffix); + + /* Truncate the current term and append suffix data. */ + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + assert( n+nSuffix<=pReader->nData ); + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } + pReader->iBlockid++; +} + +/* Compare the current term to pTerm[nTerm], returning strcmp-style +** results. If isPrefix, equality means equal through nTerm bytes. +*/ +static int interiorReaderTermCmp(InteriorReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + const char *pReaderTerm = interiorReaderTerm(pReader); + int nReaderTerm = interiorReaderTermBytes(pReader); + int c, n = nReaderTerm0 ) return -1; + if( nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReaderTerm, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return nReaderTerm - nTerm; +} + +/****************************************************************/ +/* LeafWriter is used to collect terms and associated doclist data +** into leaf blocks in %_segments (see top of file for format info). +** Expected usage is: +** +** LeafWriter writer; +** leafWriterInit(0, 0, &writer); +** while( sorted_terms_left_to_process ){ +** // data is doclist data for that term. +** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); +** if( rc!=SQLITE_OK ) goto err; +** } +** rc = leafWriterFinalize(v, &writer); +**err: +** leafWriterDestroy(&writer); +** return rc; +** +** leafWriterStep() may write a collected leaf out to %_segments. +** leafWriterFinalize() finishes writing any buffered data and stores +** a root node in %_segdir. leafWriterDestroy() frees all buffers and +** InteriorWriters allocated as part of writing this segment. +** +** TODO(shess) Document leafWriterStepMerge(). +*/ + +/* Put terms with data this big in their own block. */ +#define STANDALONE_MIN 1024 + +/* Keep leaf blocks below this size. */ +#define LEAF_MAX 2048 + +typedef struct LeafWriter { + int iLevel; + int idx; + sqlite_int64 iStartBlockid; /* needed to create the root info */ + sqlite_int64 iEndBlockid; /* when we're done writing. */ + + DataBuffer term; /* previous encoded term */ + DataBuffer data; /* encoding buffer */ + + /* bytes of first term in the current node which distinguishes that + ** term from the last term of the previous node. + */ + int nTermDistinct; + + InteriorWriter parentWriter; /* if we overflow */ + int has_parent; +} LeafWriter; + +static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ + CLEAR(pWriter); + pWriter->iLevel = iLevel; + pWriter->idx = idx; + + dataBufferInit(&pWriter->term, 32); + + /* Start out with a reasonably sized block, though it can grow. */ + dataBufferInit(&pWriter->data, LEAF_MAX); +} + +#ifndef NDEBUG +/* Verify that the data is readable as a leaf node. */ +static void leafNodeValidate(const char *pData, int nData){ + int n, iDummy; + + if( nData==0 ) return; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with a varint(0) */ + n = getVarint32(pData, &iDummy); + assert( iDummy==0 ); + assert( n>0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + + /* Verify that trailing terms and doclists also are readable. */ + while( nData!=0 ){ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + } +} +#define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) +#else +#define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) +#endif + +/* Flush the current leaf node to %_segments, and adding the resulting +** blockid and the starting term to the interior node which will +** contain it. +*/ +static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, + int iData, int nData){ + sqlite_int64 iBlockid = 0; + const char *pStartingTerm; + int nStartingTerm, rc, n; + + /* Must have the leading varint(0) flag, plus at least some + ** valid-looking data. + */ + assert( nData>2 ); + assert( iData>=0 ); + assert( iData+nData<=pWriter->data.nData ); + ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); + + rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + assert( iBlockid!=0 ); + + /* Reconstruct the first term in the leaf for purposes of building + ** the interior node. + */ + n = getVarint32(pWriter->data.pData+iData+1, &nStartingTerm); + pStartingTerm = pWriter->data.pData+iData+1+n; + assert( pWriter->data.nData>iData+1+n+nStartingTerm ); + assert( pWriter->nTermDistinct>0 ); + assert( pWriter->nTermDistinct<=nStartingTerm ); + nStartingTerm = pWriter->nTermDistinct; + + if( pWriter->has_parent ){ + interiorWriterAppend(&pWriter->parentWriter, + pStartingTerm, nStartingTerm, iBlockid); + }else{ + interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, + &pWriter->parentWriter); + pWriter->has_parent = 1; + } + + /* Track the span of this segment's leaf nodes. */ + if( pWriter->iEndBlockid==0 ){ + pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; + }else{ + pWriter->iEndBlockid++; + assert( iBlockid==pWriter->iEndBlockid ); + } + + return SQLITE_OK; +} +static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ + int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); + if( rc!=SQLITE_OK ) return rc; + + /* Re-initialize the output buffer. */ + dataBufferReset(&pWriter->data); + + return SQLITE_OK; +} + +/* Fetch the root info for the segment. If the entire leaf fits +** within ROOT_MAX, then it will be returned directly, otherwise it +** will be flushed and the root info will be returned from the +** interior node. *piEndBlockid is set to the blockid of the last +** interior or leaf node written to disk (0 if none are written at +** all). +*/ +static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + /* we can fit the segment entirely inline */ + if( !pWriter->has_parent && pWriter->data.nDatadata.pData; + *pnRootInfo = pWriter->data.nData; + *piEndBlockid = 0; + return SQLITE_OK; + } + + /* Flush remaining leaf data. */ + if( pWriter->data.nData>0 ){ + int rc = leafWriterFlush(v, pWriter); + if( rc!=SQLITE_OK ) return rc; + } + + /* We must have flushed a leaf at some point. */ + assert( pWriter->has_parent ); + + /* Tenatively set the end leaf blockid as the end blockid. If the + ** interior node can be returned inline, this will be the final + ** blockid, otherwise it will be overwritten by + ** interiorWriterRootInfo(). + */ + *piEndBlockid = pWriter->iEndBlockid; + + return interiorWriterRootInfo(v, &pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/* Collect the rootInfo data and store it into the segment directory. +** This has the effect of flushing the segment's leaf data to +** %_segments, and also flushing any interior nodes to %_segments. +*/ +static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ + sqlite_int64 iEndBlockid; + char *pRootInfo; + int rc, nRootInfo; + + rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + /* Don't bother storing an entirely empty segment. */ + if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; + + return segdir_set(v, pWriter->iLevel, pWriter->idx, + pWriter->iStartBlockid, pWriter->iEndBlockid, + iEndBlockid, pRootInfo, nRootInfo); +} + +static void leafWriterDestroy(LeafWriter *pWriter){ + if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); + dataBufferDestroy(&pWriter->term); + dataBufferDestroy(&pWriter->data); +} + +/* Encode a term into the leafWriter, delta-encoding as appropriate. +** Returns the length of the new term which distinguishes it from the +** previous term, which can be used to set nTermDistinct when a node +** boundary is crossed. +*/ +static int leafWriterEncodeTerm(LeafWriter *pWriter, + const char *pTerm, int nTerm){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + assert( nTerm>0 ); + while( nPrefixterm.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + /* Failing this implies that the terms weren't in order. */ + assert( nPrefixdata.nData==0 ){ + /* Encode the node header and leading term as: + ** varint(0) + ** varint(nTerm) + ** char pTerm[nTerm] + */ + n = putVarint(c, '\0'); + n += putVarint(c+n, nTerm); + dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); + }else{ + /* Delta-encode the term as: + ** varint(nPrefix) + ** varint(nSuffix) + ** char pTermSuffix[nSuffix] + */ + n = putVarint(c, nPrefix); + n += putVarint(c+n, nTerm-nPrefix); + dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); + } + dataBufferReplace(&pWriter->term, pTerm, nTerm); + + return nPrefix+1; +} + +/* Used to avoid a memmove when a large amount of doclist data is in +** the buffer. This constructs a node and term header before +** iDoclistData and flushes the resulting complete node using +** leafWriterInternalFlush(). +*/ +static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + int iDoclistData){ + char c[VARINT_MAX+VARINT_MAX]; + int iData, n = putVarint(c, 0); + n += putVarint(c+n, nTerm); + + /* There should always be room for the header. Even if pTerm shared + ** a substantial prefix with the previous term, the entire prefix + ** could be constructed from earlier data in the doclist, so there + ** should be room. + */ + assert( iDoclistData>=n+nTerm ); + + iData = iDoclistData-(n+nTerm); + memcpy(pWriter->data.pData+iData, c, n); + memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); + + return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + DLReader *pReaders, int nReaders){ + char c[VARINT_MAX+VARINT_MAX]; + int iTermData = pWriter->data.nData, iDoclistData; + int i, nData, n, nActualData, nActual, rc, nTermDistinct; + + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); + + /* Remember nTermDistinct if opening a new node. */ + if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; + + iDoclistData = pWriter->data.nData; + + /* Estimate the length of the merged doclist so we can leave space + ** to encode it. + */ + for(i=0, nData=0; idata, c, n); + + docListMerge(&pWriter->data, pReaders, nReaders); + ASSERT_VALID_DOCLIST(DL_DEFAULT, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-iDoclistData-n, NULL); + + /* The actual amount of doclist data at this point could be smaller + ** than the length we encoded. Additionally, the space required to + ** encode this length could be smaller. For small doclists, this is + ** not a big deal, we can just use memmove() to adjust things. + */ + nActualData = pWriter->data.nData-(iDoclistData+n); + nActual = putVarint(c, nActualData); + assert( nActualData<=nData ); + assert( nActual<=n ); + + /* If the new doclist is big enough for force a standalone leaf + ** node, we can immediately flush it inline without doing the + ** memmove(). + */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData-iTermData>STANDALONE_MIN. + */ + if( nTerm+nActualData>STANDALONE_MIN ){ + /* Push leaf node from before this term. */ + if( iTermData>0 ){ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + } + + /* Fix the encoded doclist length. */ + iDoclistData += n - nActual; + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* Push the standalone leaf node. */ + rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); + if( rc!=SQLITE_OK ) return rc; + + /* Leave the node empty. */ + dataBufferReset(&pWriter->data); + + return rc; + } + + /* At this point, we know that the doclist was small, so do the + ** memmove if indicated. + */ + if( nActualdata.pData+iDoclistData+nActual, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-(iDoclistData+n)); + pWriter->data.nData -= n-nActual; + } + + /* Replace written length with actual length. */ + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* If the node is too large, break things up. */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData>LEAF_MAX. + */ + if( iTermData+nTerm+nActualData>LEAF_MAX ){ + /* Flush out the leading data as a node */ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + + /* Rebuild header using the current term */ + n = putVarint(pWriter->data.pData, 0); + n += putVarint(pWriter->data.pData+n, nTerm); + memcpy(pWriter->data.pData+n, pTerm, nTerm); + n += nTerm; + + /* There should always be room, because the previous encoding + ** included all data necessary to construct the term. + */ + assert( ndata.nData-iDoclistDatadata.pData+n, + pWriter->data.pData+iDoclistData, + pWriter->data.nData-iDoclistData); + pWriter->data.nData -= iDoclistData-n; + } + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + + return SQLITE_OK; +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +/* TODO(shess) Revise writeZeroSegment() so that doclists are +** constructed directly in pWriter->data. +*/ +static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + const char *pData, int nData){ + int rc; + DLReader reader; + + dlrInit(&reader, DL_DEFAULT, pData, nData); + rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); + dlrDestroy(&reader); + + return rc; +} + + +/****************************************************************/ +/* LeafReader is used to iterate over an individual leaf node. */ +typedef struct LeafReader { + DataBuffer term; /* copy of current term. */ + + const char *pData; /* data for current term. */ + int nData; +} LeafReader; + +static void leafReaderDestroy(LeafReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +static int leafReaderAtEnd(LeafReader *pReader){ + return pReader->nData<=0; +} + +/* Access the current term. */ +static int leafReaderTermBytes(LeafReader *pReader){ + return pReader->term.nData; +} +static const char *leafReaderTerm(LeafReader *pReader){ + assert( pReader->term.nData>0 ); + return pReader->term.pData; +} + +/* Access the doclist data for the current term. */ +static int leafReaderDataBytes(LeafReader *pReader){ + int nData; + assert( pReader->term.nData>0 ); + getVarint32(pReader->pData, &nData); + return nData; +} +static const char *leafReaderData(LeafReader *pReader){ + int n, nData; + assert( pReader->term.nData>0 ); + n = getVarint32(pReader->pData, &nData); + return pReader->pData+n; +} + +static void leafReaderInit(const char *pData, int nData, + LeafReader *pReader){ + int nTerm, n; + + assert( nData>0 ); + assert( pData[0]=='\0' ); + + CLEAR(pReader); + + /* Read the first term, skipping the header byte. */ + n = getVarint32(pData+1, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pData+1+n, nTerm); + + /* Position after the first term. */ + assert( 1+n+nTermpData = pData+1+n+nTerm; + pReader->nData = nData-1-n-nTerm; +} + +/* Step the reader forward to the next term. */ +static void leafReaderStep(LeafReader *pReader){ + int n, nData, nPrefix, nSuffix; + assert( !leafReaderAtEnd(pReader) ); + + /* Skip previous entry's data block. */ + n = getVarint32(pReader->pData, &nData); + assert( n+nData<=pReader->nData ); + pReader->pData += n+nData; + pReader->nData -= n+nData; + + if( !leafReaderAtEnd(pReader) ){ + /* Construct the new term using a prefix from the old term plus a + ** suffix from the leaf data. + */ + n = getVarint32(pReader->pData, &nPrefix); + n += getVarint32(pReader->pData+n, &nSuffix); + assert( n+nSuffixnData ); + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } +} + +/* strcmp-style comparison of pReader's current term against pTerm. +** If isPrefix, equality means equal through nTerm bytes. +*/ +static int leafReaderTermCmp(LeafReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + int c, n = pReader->term.nDataterm.nData : nTerm; + if( n==0 ){ + if( pReader->term.nData>0 ) return -1; + if(nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReader->term.pData, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return pReader->term.nData - nTerm; +} + + +/****************************************************************/ +/* LeavesReader wraps LeafReader to allow iterating over the entire +** leaf layer of the tree. +*/ +typedef struct LeavesReader { + int idx; /* Index within the segment. */ + + sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ + int eof; /* we've seen SQLITE_DONE from pStmt. */ + + LeafReader leafReader; /* reader for the current leaf. */ + DataBuffer rootData; /* root data for inline. */ +} LeavesReader; + +/* Access the current term. */ +static int leavesReaderTermBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTermBytes(&pReader->leafReader); +} +static const char *leavesReaderTerm(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTerm(&pReader->leafReader); +} + +/* Access the doclist data for the current term. */ +static int leavesReaderDataBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderDataBytes(&pReader->leafReader); +} +static const char *leavesReaderData(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderData(&pReader->leafReader); +} + +static int leavesReaderAtEnd(LeavesReader *pReader){ + return pReader->eof; +} + +/* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus +** leaving the statement handle open, which locks the table. +*/ +/* TODO(shess) This "solution" is not satisfactory. Really, there +** should be check-in function for all statement handles which +** arranges to call sqlite3_reset(). This most likely will require +** modification to control flow all over the place, though, so for now +** just punt. +** +** Note the the current system assumes that segment merges will run to +** completion, which is why this particular probably hasn't arisen in +** this case. Probably a brittle assumption. +*/ +static int leavesReaderReset(LeavesReader *pReader){ + return sqlite3_reset(pReader->pStmt); +} + +static void leavesReaderDestroy(LeavesReader *pReader){ + leafReaderDestroy(&pReader->leafReader); + dataBufferDestroy(&pReader->rootData); + SCRAMBLE(pReader); +} + +/* Initialize pReader with the given root data (if iStartBlockid==0 +** the leaf data was entirely contained in the root), or from the +** stream of blocks between iStartBlockid and iEndBlockid, inclusive. +*/ +static int leavesReaderInit(fulltext_vtab *v, + int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData, + LeavesReader *pReader){ + CLEAR(pReader); + pReader->idx = idx; + + dataBufferInit(&pReader->rootData, 0); + if( iStartBlockid==0 ){ + /* Entire leaf level fit in root data. */ + dataBufferReplace(&pReader->rootData, pRootData, nRootData); + leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, + &pReader->leafReader); + }else{ + sqlite3_stmt *s; + int rc = sql_get_leaf_statement(v, idx, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + pReader->eof = 1; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + pReader->pStmt = s; + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Step the current leaf forward to the next term. If we reach the +** end of the current leaf, step forward to the next leaf block. +*/ +static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ + assert( !leavesReaderAtEnd(pReader) ); + leafReaderStep(&pReader->leafReader); + + if( leafReaderAtEnd(&pReader->leafReader) ){ + int rc; + if( pReader->rootData.pData ){ + pReader->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_step(pReader->pStmt); + if( rc!=SQLITE_ROW ){ + pReader->eof = 1; + return rc==SQLITE_DONE ? SQLITE_OK : rc; + } + leafReaderDestroy(&pReader->leafReader); + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Order LeavesReaders by their term, ignoring idx. Readers at eof +** always sort to the end. +*/ +static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ + if( leavesReaderAtEnd(lr1) ){ + if( leavesReaderAtEnd(lr2) ) return 0; + return 1; + } + if( leavesReaderAtEnd(lr2) ) return -1; + + return leafReaderTermCmp(&lr1->leafReader, + leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), + 0); +} + +/* Similar to leavesReaderTermCmp(), with additional ordering by idx +** so that older segments sort before newer segments. +*/ +static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ + int c = leavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->idx-lr2->idx; +} + +/* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its +** sorted position. +*/ +static void leavesReaderReorder(LeavesReader *pLr, int nLr){ + while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ + LeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* Initializes pReaders with the segments from level iLevel, returning +** the number of segments in *piReaders. Leaves pReaders in sorted +** order. +*/ +static int leavesReadersInit(fulltext_vtab *v, int iLevel, + LeavesReader *pReaders, int *piReaders){ + sqlite3_stmt *s; + int i, rc = sql_get_statement(v, SEGDIR_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + assert( i0 ){ + leavesReaderDestroy(&pReaders[i]); + } + return rc; + } + + *piReaders = i; + + /* Leave our results sorted by term, then age. */ + while( i-- ){ + leavesReaderReorder(pReaders+i, *piReaders-i); + } + return SQLITE_OK; +} + +/* Merge doclists from pReaders[nReaders] into a single doclist, which +** is written to pWriter. Assumes pReaders is ordered oldest to +** newest. +*/ +/* TODO(shess) Consider putting this inline in segmentMerge(). */ +static int leavesReadersMerge(fulltext_vtab *v, + LeavesReader *pReaders, int nReaders, + LeafWriter *pWriter){ + DLReader dlReaders[MERGE_COUNT]; + const char *pTerm = leavesReaderTerm(pReaders); + int i, nTerm = leavesReaderTermBytes(pReaders); + + assert( nReaders<=MERGE_COUNT ); + + for(i=0; i0 ){ + rc = leavesReaderStep(v, lrs+i); + if( rc!=SQLITE_OK ) goto err; + + /* Reorder by term, then by age. */ + leavesReaderReorder(lrs+i, MERGE_COUNT-i); + } + } + + for(i=0; i0 ); + + /* Process while the prefix matches. */ + while( !leavesReaderAtEnd(pReader) ){ + /* TODO(shess) Really want leavesReaderTermCmp(), but that name is + ** already taken to compare the terms of two LeavesReaders. Think + ** on a better name. [Meanwhile, break encapsulation rather than + ** use a confusing name.] + */ + int rc; + int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); + if( c==0 ){ + const char *pData = leavesReaderData(pReader); + int nData = leavesReaderDataBytes(pReader); + if( out->nData==0 ){ + dataBufferReplace(out, pData, nData); + }else{ + DataBuffer result; + dataBufferInit(&result, out->nData+nData); + docListUnion(out->pData, out->nData, pData, nData, &result); + dataBufferDestroy(out); + *out = result; + /* TODO(shess) Rather than destroy out, we could retain it for + ** later reuse. + */ + } + } + if( c>0 ) break; /* Past any possible matches. */ + + rc = leavesReaderStep(v, pReader); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Call loadSegmentLeavesInt() with pData/nData as input. */ +static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + LeavesReader reader; + int rc; + + assert( nData>1 ); + assert( *pData=='\0' ); + rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to +** iEndLeaf (inclusive) as input, and merge the resulting doclist into +** out. +*/ +static int loadSegmentLeaves(fulltext_vtab *v, + sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + int rc; + LeavesReader reader; + + assert( iStartLeaf<=iEndLeaf ); + rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Taking pData/nData as an interior node, find the sequence of child +** nodes which could include pTerm/nTerm/isPrefix. Note that the +** interior node terms logically come between the blocks, so there is +** one more blockid than there are terms (that block contains terms >= +** the last interior-node term). +*/ +/* TODO(shess) The calling code may already know that the end child is +** not worth calculating, because the end may be in a later sibling +** node. Consider whether breaking symmetry is worthwhile. I suspect +** it's not worthwhile. +*/ +static void getChildrenContaining(const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, + sqlite_int64 *piEndChild){ + InteriorReader reader; + + assert( nData>1 ); + assert( *pData!='\0' ); + interiorReaderInit(pData, nData, &reader); + + /* Scan for the first child which could contain pTerm/nTerm. */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; + interiorReaderStep(&reader); + } + *piStartChild = interiorReaderCurrentBlockid(&reader); + + /* Keep scanning to find a term greater than our term, using prefix + ** comparison if indicated. If isPrefix is false, this will be the + ** same blockid as the starting block. + */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; + interiorReaderStep(&reader); + } + *piEndChild = interiorReaderCurrentBlockid(&reader); + + interiorReaderDestroy(&reader); + + /* Children must ascend, and if !prefix, both must be the same. */ + assert( *piEndChild>=*piStartChild ); + assert( isPrefix || *piStartChild==*piEndChild ); +} + +/* Read block at iBlockid and pass it with other params to +** getChildrenContaining(). +*/ +static int loadAndGetChildrenContaining( + fulltext_vtab *v, + sqlite_int64 iBlockid, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, sqlite_int64 *piEndChild +){ + sqlite3_stmt *s = NULL; + int rc; + + assert( iBlockid!=0 ); + assert( pTerm!=NULL ); + assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ + assert( piStartChild!=NULL ); + assert( piEndChild!=NULL ); + + rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ERROR; + if( rc!=SQLITE_ROW ) return rc; + + getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0), + pTerm, nTerm, isPrefix, piStartChild, piEndChild); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain + * locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + return SQLITE_OK; +} + +/* Traverse the tree represented by pData[nData] looking for +** pTerm[nTerm], placing its doclist into *out. This is internal to +** loadSegment() to make error-handling cleaner. +*/ +static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + /* Special case where root is a leaf. */ + if( *pData=='\0' ){ + return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); + }else{ + int rc; + sqlite_int64 iStartChild, iEndChild; + + /* Process pData as an interior node, then loop down the tree + ** until we find the set of leaf nodes to scan for the term. + */ + getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, + &iStartChild, &iEndChild); + while( iStartChild>iLeavesEnd ){ + sqlite_int64 iNextStart, iNextEnd; + rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, + &iNextStart, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + + /* If we've branched, follow the end branch, too. */ + if( iStartChild!=iEndChild ){ + sqlite_int64 iDummy; + rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, + &iDummy, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + } + + assert( iNextStart<=iNextEnd ); + iStartChild = iNextStart; + iEndChild = iNextEnd; + } + assert( iStartChild<=iLeavesEnd ); + assert( iEndChild<=iLeavesEnd ); + + /* Scan through the leaf segments for doclists. */ + return loadSegmentLeaves(v, iStartChild, iEndChild, + pTerm, nTerm, isPrefix, out); + } +} + +/* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then +** merge its doclist over *out (any duplicate doclists read from the +** segment rooted at pData will overwrite those in *out). +*/ +/* TODO(shess) Consider changing this to determine the depth of the +** leaves using either the first characters of interior nodes (when +** ==1, we're one level above the leaves), or the first character of +** the root (which will describe the height of the tree directly). +** Either feels somewhat tricky to me. +*/ +/* TODO(shess) The current merge is likely to be slow for large +** doclists (though it should process from newest/smallest to +** oldest/largest, so it may not be that bad). It might be useful to +** modify things to allow for N-way merging. This could either be +** within a segment, with pairwise merges across segments, or across +** all segments at once. +*/ +static int loadSegment(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + DataBuffer result; + int rc; + + assert( nData>1 ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&result, 0); + rc = loadSegmentInt(v, pData, nData, iLeavesEnd, + pTerm, nTerm, isPrefix, &result); + if( rc==SQLITE_OK && result.nData>0 ){ + if( out->nData==0 ){ + DataBuffer tmp = *out; + *out = result; + result = tmp; + }else{ + DataBuffer merged; + DLReader readers[2]; + + dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); + dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); + dataBufferInit(&merged, out->nData+result.nData); + docListMerge(&merged, readers, 2); + dataBufferDestroy(out); + *out = merged; + dlrDestroy(&readers[0]); + dlrDestroy(&readers[1]); + } + } + dataBufferDestroy(&result); + return rc; +} + +/* Scan the database and merge together the posting lists for the term +** into *out. +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out){ + DataBuffer doclist; + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&doclist, 0); + + /* Traverse the segments from oldest to newest so that newer doclist + ** elements for given docids overwrite older elements. + */ + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + const char *pData = sqlite3_column_blob(s, 0); + const int nData = sqlite3_column_bytes(s, 0); + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, + &doclist); + if( rc!=SQLITE_OK ) goto err; + } + if( rc==SQLITE_DONE ){ + if( doclist.nData!=0 ){ + /* TODO(shess) The old term_select_all() code applied the column + ** restrict as we merged segments, leading to smaller buffers. + ** This is probably worthwhile to bring back, once the new storage + ** system is checked in. + */ + if( iColumn==v->nColumn) iColumn = -1; + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + iColumn, iType, out); + } + rc = SQLITE_OK; + } + + err: + dataBufferDestroy(&doclist); + return rc; +} + +/****************************************************************/ +/* Used to hold hashtable data for sorting. */ +typedef struct TermData { + const char *pTerm; + int nTerm; + DLCollector *pCollector; +} TermData; + +/* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 +** for equal, >0 for greater-than). +*/ +static int termDataCmp(const void *av, const void *bv){ + const TermData *a = (const TermData *)av; + const TermData *b = (const TermData *)bv; + int n = a->nTermnTerm ? a->nTerm : b->nTerm; + int c = memcmp(a->pTerm, b->pTerm, n); + if( c!=0 ) return c; + return a->nTerm-b->nTerm; +} + +/* Order pTerms data by term, then write a new level 0 segment using +** LeafWriter. +*/ +static int writeZeroSegment(fulltext_vtab *v, fts2Hash *pTerms){ + fts2HashElem *e; + int idx, rc, i, n; + TermData *pData; + LeafWriter writer; + DataBuffer dl; + + /* Determine the next index at level 0, merging as necessary. */ + rc = segdirNextIndex(v, 0, &idx); + if( rc!=SQLITE_OK ) return rc; + + n = fts2HashCount(pTerms); + pData = malloc(n*sizeof(TermData)); + + for(i = 0, e = fts2HashFirst(pTerms); e; i++, e = fts2HashNext(e)){ + assert( i1 ) qsort(pData, n, sizeof(*pData), termDataCmp); + + /* TODO(shess) Refactor so that we can write directly to the segment + ** DataBuffer, as happens for segment merges. + */ + leafWriterInit(0, idx, &writer); + dataBufferInit(&dl, 0); + for(i=0; inPendingData>=0 ){ + fts2HashElem *e; + for(e=fts2HashFirst(&v->pendingTerms); e; e=fts2HashNext(e)){ + dlcDelete(fts2HashData(e)); + } + fts2HashClear(&v->pendingTerms); + v->nPendingData = -1; + } + return SQLITE_OK; +} + +/* If pendingTerms has data, flush it to a level-zero segment, and +** free it. +*/ +static int flushPendingTerms(fulltext_vtab *v){ + if( v->nPendingData>=0 ){ + int rc = writeZeroSegment(v, &v->pendingTerms); + if( rc==SQLITE_OK ) clearPendingTerms(v); + return rc; + } + return SQLITE_OK; +} + +/* If pendingTerms is "too big", or docid is out of order, flush it. +** Regardless, be certain that pendingTerms is initialized for use. +*/ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + /* TODO(shess) Explore whether partially flushing the buffer on + ** forced-flush would provide better performance. I suspect that if + ** we ordered the doclists by size and flushed the largest until the + ** buffer was half empty, that would let the less frequent terms + ** generate longer doclists. + */ + if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ + int rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + } + if( v->nPendingData<0 ){ + fts2HashInit(&v->pendingTerms, FTS2_HASH_STRING, 1); + v->nPendingData = 0; + } + v->iPrevDocid = iDocid; + return SQLITE_OK; +} + +/* This function implements the xUpdate callback; it's the top-level entry + * point for inserting, deleting or updating a row in a full-text table. */ +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + int rc; + + TRACE(("FTS2 Update %p\n", pVtab)); + + if( nArg<2 ){ + rc = index_delete(v, sqlite3_value_int64(ppArg[0])); + } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + /* An update: + * ppArg[0] = old rowid + * ppArg[1] = new rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); + if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the rowid */ + } else { + assert( nArg==2+v->nColumn+1); + rc = index_update(v, rowid, &ppArg[2]); + } + } else { + /* An insert: + * ppArg[1] = requested rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + assert( nArg==2+v->nColumn+1); + rc = index_insert(v, ppArg[1], &ppArg[2], pRowid); + } + + return rc; +} + +static int fulltextSync(sqlite3_vtab *pVtab){ + TRACE(("FTS2 xSync()\n")); + return flushPendingTerms((fulltext_vtab *)pVtab); +} + +static int fulltextBegin(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + TRACE(("FTS2 xBegin()\n")); + + /* Any buffered updates should have been cleared by the previous + ** transaction. + */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextCommit(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + TRACE(("FTS2 xCommit()\n")); + + /* Buffered updates should have been cleared by fulltextSync(). */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextRollback(sqlite3_vtab *pVtab){ + TRACE(("FTS2 xRollback()\n")); + return clearPendingTerms((fulltext_vtab *)pVtab); +} + +/* +** Implementation of the snippet() function for FTS2 +*/ +static void snippetFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); + }else{ + const char *zStart = ""; + const char *zEnd = ""; + const char *zEllipsis = "..."; + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + if( argc>=2 ){ + zStart = (const char*)sqlite3_value_text(argv[1]); + if( argc>=3 ){ + zEnd = (const char*)sqlite3_value_text(argv[2]); + if( argc>=4 ){ + zEllipsis = (const char*)sqlite3_value_text(argv[3]); + } + } + } + snippetAllOffsets(pCursor); + snippetText(pCursor, zStart, zEnd, zEllipsis); + sqlite3_result_text(pContext, pCursor->snippet.zSnippet, + pCursor->snippet.nSnippet, SQLITE_STATIC); + } +} + +/* +** Implementation of the offsets() function for FTS2 +*/ +static void snippetOffsetsFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to offsets",-1); + }else{ + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + snippetAllOffsets(pCursor); + snippetOffsetText(&pCursor->snippet); + sqlite3_result_text(pContext, + pCursor->snippet.zOffset, pCursor->snippet.nOffset, + SQLITE_STATIC); + } +} + +/* +** This routine implements the xFindFunction method for the FTS2 +** virtual table. +*/ +static int fulltextFindFunction( + sqlite3_vtab *pVtab, + int nArg, + const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg +){ + if( strcmp(zName,"snippet")==0 ){ + *pxFunc = snippetFunc; + return 1; + }else if( strcmp(zName,"offsets")==0 ){ + *pxFunc = snippetOffsetsFunc; + return 1; + } + return 0; +} + +/* +** Rename an fts2 table. +*/ +static int fulltextRename( + sqlite3_vtab *pVtab, + const char *zName +){ + fulltext_vtab *p = (fulltext_vtab *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" + "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" + "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + ); + if( zSql ){ + rc = sqlite3_exec(p->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static const sqlite3_module fts2Module = { + /* iVersion */ 0, + /* xCreate */ fulltextCreate, + /* xConnect */ fulltextConnect, + /* xBestIndex */ fulltextBestIndex, + /* xDisconnect */ fulltextDisconnect, + /* xDestroy */ fulltextDestroy, + /* xOpen */ fulltextOpen, + /* xClose */ fulltextClose, + /* xFilter */ fulltextFilter, + /* xNext */ fulltextNext, + /* xEof */ fulltextEof, + /* xColumn */ fulltextColumn, + /* xRowid */ fulltextRowid, + /* xUpdate */ fulltextUpdate, + /* xBegin */ fulltextBegin, + /* xSync */ fulltextSync, + /* xCommit */ fulltextCommit, + /* xRollback */ fulltextRollback, + /* xFindFunction */ fulltextFindFunction, + /* xRename */ fulltextRename, +}; + +static void hashDestroy(void *p){ + fts2Hash *pHash = (fts2Hash *)p; + sqlite3Fts2HashClear(pHash); + sqlite3_free(pHash); +} + +/* +** The fts2 built-in tokenizers - "simple" and "porter" - are implemented +** in files fts2_tokenizer1.c and fts2_porter.c respectively. The following +** two forward declarations are for functions declared in these files +** used to retrieve the respective implementations. +** +** Calling sqlite3Fts2SimpleTokenizerModule() sets the value pointed +** to by the argument to point a the "simple" tokenizer implementation. +** Function ...PorterTokenizerModule() sets *pModule to point to the +** porter tokenizer/stemmer implementation. +*/ +void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts2PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts2IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +int sqlite3Fts2InitHashTable(sqlite3 *, fts2Hash *, const char *); + +/* +** Initialise the fts2 extension. If this extension is built as part +** of the sqlite library, then this function is called directly by +** SQLite. If fts2 is built as a dynamically loadable extension, this +** function is called by the sqlite3_extension_init() entry point. +*/ +int sqlite3Fts2Init(sqlite3 *db){ + int rc = SQLITE_OK; + fts2Hash *pHash = 0; + const sqlite3_tokenizer_module *pSimple = 0; + const sqlite3_tokenizer_module *pPorter = 0; + const sqlite3_tokenizer_module *pIcu = 0; + + sqlite3Fts2SimpleTokenizerModule(&pSimple); + sqlite3Fts2PorterTokenizerModule(&pPorter); +#ifdef SQLITE_ENABLE_ICU + sqlite3Fts2IcuTokenizerModule(&pIcu); +#endif + + /* Allocate and initialise the hash-table used to store tokenizers. */ + pHash = sqlite3_malloc(sizeof(fts2Hash)); + if( !pHash ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); + } + + /* Load the built-in tokenizers into the hash table */ + if( rc==SQLITE_OK ){ + if( sqlite3Fts2HashInsert(pHash, "simple", 7, (void *)pSimple) + || sqlite3Fts2HashInsert(pHash, "porter", 7, (void *)pPorter) + || (pIcu && sqlite3Fts2HashInsert(pHash, "icu", 4, (void *)pIcu)) + ){ + rc = SQLITE_NOMEM; + } + } + + /* Create the virtual table wrapper around the hash-table and overload + ** the two scalar functions. If this is successful, register the + ** module with sqlite. + */ + if( SQLITE_OK==rc + && SQLITE_OK==(rc = sqlite3Fts2InitHashTable(db, pHash, "fts2_tokenizer")) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) + ){ + return sqlite3_create_module_v2( + db, "fts2", &fts2Module, (void *)pHash, hashDestroy + ); + } + + /* An error has occured. Delete the hash table and return the error code. */ + assert( rc!=SQLITE_OK ); + if( pHash ){ + sqlite3Fts2HashClear(pHash); + sqlite3_free(pHash); + } + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts2Init(db); +} +#endif + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2.h new file mode 100644 index 0000000000..4da4c3877b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2.h @@ -0,0 +1,26 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** FTS2 library. All it does is declare the sqlite3Fts2Init() interface. +*/ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3Fts2Init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_hash.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_hash.c new file mode 100644 index 0000000000..fcd5cc2fd8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_hash.c @@ -0,0 +1,369 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + +#include +#include +#include + +#include "fts2_hash.h" + +static void *malloc_and_zero(int n){ + void *p = malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS2_HASH_BINARY or FTS2_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +void sqlite3Fts2HashInit(fts2Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS2_HASH_STRING && keyClass<=FTS2_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; + pNew->xMalloc = malloc_and_zero; + pNew->xFree = free; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3Fts2HashClear(fts2Hash *pH){ + fts2HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + fts2HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS2_HASH_STRING +*/ +static int strHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS2_HASH_BINARY +*/ +static int binHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS2_HASH_STRING ){ + return &strHash; + }else{ + assert( keyClass==FTS2_HASH_BINARY ); + return &binHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS2_HASH_STRING ){ + return &strCompare; + }else{ + assert( keyClass==FTS2_HASH_BINARY ); + return &binCompare; + } +} + +/* Link an element into the hash table +*/ +static void insertElement( + fts2Hash *pH, /* The complete hash table */ + struct _fts2ht *pEntry, /* The entry into which pNew is inserted */ + fts2HashElem *pNew /* The element to be inserted */ +){ + fts2HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void rehash(fts2Hash *pH, int new_size){ + struct _fts2ht *new_ht; /* The new hash table */ + fts2HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts2ht *)pH->xMalloc( new_size*sizeof(struct _fts2ht) ); + if( new_ht==0 ) return; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static fts2HashElem *findElementGivenHash( + const fts2Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + fts2HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts2ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + fts2Hash *pH, /* The pH containing "elem" */ + fts2HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts2ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts2HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3Fts2HashFind(const fts2Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + fts2HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3Fts2HashInsert( + fts2Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + fts2HashElem *elem; /* Used to loop thru the element list */ + fts2HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = findElementGivenHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (fts2HashElem*)pH->xMalloc( sizeof(fts2HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = pH->xMalloc( nKey ); + if( new_elem->pKey==0 ){ + pH->xFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + pH->xFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + insertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_hash.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_hash.h new file mode 100644 index 0000000000..97f35291cb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_hash.h @@ -0,0 +1,112 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS2_HASH_H_ +#define _FTS2_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct fts2Hash fts2Hash; +typedef struct fts2HashElem fts2HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct fts2Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + fts2HashElem *first; /* The first element of the array */ + void *(*xMalloc)(int); /* malloc() function to use */ + void (*xFree)(void *); /* free() function to use */ + int htsize; /* Number of buckets in the hash table */ + struct _fts2ht { /* the hash table */ + int count; /* Number of entries with this hash */ + fts2HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct fts2HashElem { + fts2HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS2_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS2_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts2HashInit is 1. +*/ +#define FTS2_HASH_STRING 1 +#define FTS2_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3Fts2HashInit(fts2Hash*, int keytype, int copyKey); +void *sqlite3Fts2HashInsert(fts2Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3Fts2HashFind(const fts2Hash*, const void *pKey, int nKey); +void sqlite3Fts2HashClear(fts2Hash*); + +/* +** Shorthand for the functions above +*/ +#define fts2HashInit sqlite3Fts2HashInit +#define fts2HashInsert sqlite3Fts2HashInsert +#define fts2HashFind sqlite3Fts2HashFind +#define fts2HashClear sqlite3Fts2HashClear + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** fts2Hash h; +** fts2HashElem *p; +** ... +** for(p=fts2HashFirst(&h); p; p=fts2HashNext(p)){ +** SomeStructure *pData = fts2HashData(p); +** // do something with pData +** } +*/ +#define fts2HashFirst(H) ((H)->first) +#define fts2HashNext(E) ((E)->next) +#define fts2HashData(E) ((E)->data) +#define fts2HashKey(E) ((E)->pKey) +#define fts2HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts2HashCount(H) ((H)->count) + +#endif /* _FTS2_HASH_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_icu.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_icu.c new file mode 100644 index 0000000000..ed15f333d6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_icu.c @@ -0,0 +1,257 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements a tokenizer for fts2 based on the ICU library. +** +** $Id: fts2_icu.c,v 1.1 2007/06/22 15:21:16 danielk1977 Exp $ +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) +#ifdef SQLITE_ENABLE_ICU + +#include +#include +#include "fts2_tokenizer.h" + +#include +#include +#include +#include + +typedef struct IcuTokenizer IcuTokenizer; +typedef struct IcuCursor IcuCursor; + +struct IcuTokenizer { + sqlite3_tokenizer base; + char *zLocale; +}; + +struct IcuCursor { + sqlite3_tokenizer_cursor base; + + UBreakIterator *pIter; /* ICU break-iterator object */ + int nChar; /* Number of UChar elements in pInput */ + UChar *aChar; /* Copy of input using utf-16 encoding */ + int *aOffset; /* Offsets of each character in utf-8 input */ + + int nBuffer; + char *zBuffer; + + int iToken; +}; + +/* +** Create a new tokenizer instance. +*/ +static int icuCreate( + int argc, /* Number of entries in argv[] */ + const char * const *argv, /* Tokenizer creation arguments */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ +){ + IcuTokenizer *p; + int n = 0; + + if( argc>0 ){ + n = strlen(argv[0])+1; + } + p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); + if( !p ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(IcuTokenizer)); + + if( n ){ + p->zLocale = (char *)&p[1]; + memcpy(p->zLocale, argv[0], n); + } + + *ppTokenizer = (sqlite3_tokenizer *)p; + + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int icuDestroy(sqlite3_tokenizer *pTokenizer){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int icuOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, /* Input string */ + int nInput, /* Length of zInput in bytes */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + IcuCursor *pCsr; + + const int32_t opt = U_FOLD_CASE_DEFAULT; + UErrorCode status = U_ZERO_ERROR; + int nChar; + + UChar32 c; + int iInput = 0; + int iOut = 0; + + *ppCursor = 0; + + nChar = nInput+1; + pCsr = (IcuCursor *)sqlite3_malloc( + sizeof(IcuCursor) + /* IcuCursor */ + nChar * sizeof(UChar) + /* IcuCursor.aChar[] */ + (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ + ); + if( !pCsr ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(IcuCursor)); + pCsr->aChar = (UChar *)&pCsr[1]; + pCsr->aOffset = (int *)&pCsr->aChar[nChar]; + + pCsr->aOffset[iOut] = iInput; + U8_NEXT(zInput, iInput, nInput, c); + while( c>0 ){ + int isError = 0; + c = u_foldCase(c, opt); + U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); + if( isError ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->aOffset[iOut] = iInput; + + if( iInputpIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); + if( !U_SUCCESS(status) ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->nChar = iOut; + + ubrk_first(pCsr->pIter); + *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to icuOpen(). +*/ +static int icuClose(sqlite3_tokenizer_cursor *pCursor){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + ubrk_close(pCsr->pIter); + sqlite3_free(pCsr->zBuffer); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. +*/ +static int icuNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + + int iStart = 0; + int iEnd = 0; + int nByte = 0; + + while( iStart==iEnd ){ + UChar32 c; + + iStart = ubrk_current(pCsr->pIter); + iEnd = ubrk_next(pCsr->pIter); + if( iEnd==UBRK_DONE ){ + return SQLITE_DONE; + } + + while( iStartaChar, iWhite, pCsr->nChar, c); + if( u_isspace(c) ){ + iStart = iWhite; + }else{ + break; + } + } + assert(iStart<=iEnd); + } + + do { + UErrorCode status = U_ZERO_ERROR; + if( nByte ){ + char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); + if( !zNew ){ + return SQLITE_NOMEM; + } + pCsr->zBuffer = zNew; + pCsr->nBuffer = nByte; + } + + u_strToUTF8( + pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ + &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ + &status /* Output success/failure */ + ); + } while( nByte>pCsr->nBuffer ); + + *ppToken = pCsr->zBuffer; + *pnBytes = nByte; + *piStartOffset = pCsr->aOffset[iStart]; + *piEndOffset = pCsr->aOffset[iEnd]; + *piPosition = pCsr->iToken++; + + return SQLITE_OK; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module icuTokenizerModule = { + 0, /* iVersion */ + icuCreate, /* xCreate */ + icuDestroy, /* xCreate */ + icuOpen, /* xOpen */ + icuClose, /* xClose */ + icuNext, /* xNext */ +}; + +/* +** Set *ppModule to point at the implementation of the ICU tokenizer. +*/ +void sqlite3Fts2IcuTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &icuTokenizerModule; +} + +#endif /* defined(SQLITE_ENABLE_ICU) */ +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_porter.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_porter.c new file mode 100644 index 0000000000..dab1849531 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_porter.c @@ -0,0 +1,642 @@ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + + +#include +#include +#include +#include +#include + +#include "fts2_tokenizer.h" + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlit3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module porterTokenizerModule; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + t = (porter_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + c = (porter_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + free(c->zToken); + free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + z[0]!=0 && isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + z[1]!=0 && isVowel(z+1) && + z[2]!=0 && isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i=sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + stem(&z, "lanoita", "ate", m_gt_0) || + stem(&z, "lanoit", "tion", m_gt_0); + break; + case 'c': + stem(&z, "icne", "ence", m_gt_0) || + stem(&z, "icna", "ance", m_gt_0); + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + stem(&z, "ilb", "ble", m_gt_0) || + stem(&z, "illa", "al", m_gt_0) || + stem(&z, "iltne", "ent", m_gt_0) || + stem(&z, "ile", "e", m_gt_0) || + stem(&z, "ilsuo", "ous", m_gt_0); + break; + case 'o': + stem(&z, "noitazi", "ize", m_gt_0) || + stem(&z, "noita", "ate", m_gt_0) || + stem(&z, "rota", "ate", m_gt_0); + break; + case 's': + stem(&z, "msila", "al", m_gt_0) || + stem(&z, "ssenevi", "ive", m_gt_0) || + stem(&z, "ssenluf", "ful", m_gt_0) || + stem(&z, "ssensuo", "ous", m_gt_0); + break; + case 't': + stem(&z, "itila", "al", m_gt_0) || + stem(&z, "itivi", "ive", m_gt_0) || + stem(&z, "itilib", "ble", m_gt_0); + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + stem(&z, "etaci", "ic", m_gt_0) || + stem(&z, "evita", "", m_gt_0) || + stem(&z, "ezila", "al", m_gt_0); + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + stem(&z, "laci", "ic", m_gt_0) || + stem(&z, "luf", "", m_gt_0); + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + stem(&z, "tneme", "", m_gt_1) || + stem(&z, "tnem", "", m_gt_1) || + stem(&z, "tne", "", m_gt_1); + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + stem(&z, "eta", "", m_gt_1) || + stem(&z, "iti", "", m_gt_1); + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char porterIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffsetnInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + c->nAllocated = n+20; + c->zToken = realloc(c->zToken, c->nAllocated); + if( c->zToken==NULL ) return SQLITE_NOMEM; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts2PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer.c new file mode 100644 index 0000000000..cbf771b47f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer.c @@ -0,0 +1,371 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is part of an SQLite module implementing full-text search. +** This particular file implements the generic tokenizer interface. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + + +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + +#include "fts2_hash.h" +#include "fts2_tokenizer.h" +#include + +/* +** Implementation of the SQL scalar function for accessing the underlying +** hash table. This function may be called as follows: +** +** SELECT (); +** SELECT (, ); +** +** where is the name passed as the second argument +** to the sqlite3Fts2InitHashTable() function (e.g. 'fts2_tokenizer'). +** +** If the argument is specified, it must be a blob value +** containing a pointer to be stored as the hash data corresponding +** to the string . If is not specified, then +** the string must already exist in the has table. Otherwise, +** an error is returned. +** +** Whether or not the argument is specified, the value returned +** is a blob containing the pointer stored as the hash data corresponding +** to string (after the hash-table is updated, if applicable). +*/ +static void scalarFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts2Hash *pHash; + void *pPtr = 0; + const unsigned char *zName; + int nName; + + assert( argc==1 || argc==2 ); + + pHash = (fts2Hash *)sqlite3_user_data(context); + + zName = sqlite3_value_text(argv[0]); + nName = sqlite3_value_bytes(argv[0])+1; + + if( argc==2 ){ + void *pOld; + int n = sqlite3_value_bytes(argv[1]); + if( n!=sizeof(pPtr) ){ + sqlite3_result_error(context, "argument type mismatch", -1); + return; + } + pPtr = *(void **)sqlite3_value_blob(argv[1]); + pOld = sqlite3Fts2HashInsert(pHash, (void *)zName, nName, pPtr); + if( pOld==pPtr ){ + sqlite3_result_error(context, "out of memory", -1); + return; + } + }else{ + pPtr = sqlite3Fts2HashFind(pHash, zName, nName); + if( !pPtr ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + } + + sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); +} + +#ifdef SQLITE_TEST + +#include +#include + +/* +** Implementation of a special SQL scalar function for testing tokenizers +** designed to be used in concert with the Tcl testing framework. This +** function must be called with two arguments: +** +** SELECT (, ); +** SELECT (, ); +** +** where is the name passed as the second argument +** to the sqlite3Fts2InitHashTable() function (e.g. 'fts2_tokenizer') +** concatenated with the string '_test' (e.g. 'fts2_tokenizer_test'). +** +** The return value is a string that may be interpreted as a Tcl +** list. For each token in the , three elements are +** added to the returned list. The first is the token position, the +** second is the token text (folded, stemmed, etc.) and the third is the +** substring of associated with the token. For example, +** using the built-in "simple" tokenizer: +** +** SELECT fts_tokenizer_test('simple', 'I don't see how'); +** +** will return the string: +** +** "{0 i I 1 dont don't 2 see see 3 how how}" +** +*/ +static void testFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts2Hash *pHash; + sqlite3_tokenizer_module *p; + sqlite3_tokenizer *pTokenizer = 0; + sqlite3_tokenizer_cursor *pCsr = 0; + + const char *zErr = 0; + + const char *zName; + int nName; + const char *zInput; + int nInput; + + const char *zArg = 0; + + const char *zToken; + int nToken; + int iStart; + int iEnd; + int iPos; + + Tcl_Obj *pRet; + + assert( argc==2 || argc==3 ); + + nName = sqlite3_value_bytes(argv[0]); + zName = (const char *)sqlite3_value_text(argv[0]); + nInput = sqlite3_value_bytes(argv[argc-1]); + zInput = (const char *)sqlite3_value_text(argv[argc-1]); + + if( argc==3 ){ + zArg = (const char *)sqlite3_value_text(argv[1]); + } + + pHash = (fts2Hash *)sqlite3_user_data(context); + p = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zName, nName+1); + + if( !p ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + + if( SQLITE_OK!=p->xCreate(zArg ? 1 : 0, &zArg, &pTokenizer) ){ + zErr = "error in xCreate()"; + goto finish; + } + pTokenizer->pModule = p; + if( SQLITE_OK!=p->xOpen(pTokenizer, zInput, nInput, &pCsr) ){ + zErr = "error in xOpen()"; + goto finish; + } + pCsr->pTokenizer = pTokenizer; + + while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ + Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + zToken = &zInput[iStart]; + nToken = iEnd-iStart; + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + } + + if( SQLITE_OK!=p->xClose(pCsr) ){ + zErr = "error in xClose()"; + goto finish; + } + if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ + zErr = "error in xDestroy()"; + goto finish; + } + +finish: + if( zErr ){ + sqlite3_result_error(context, zErr, -1); + }else{ + sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); + } + Tcl_DecrRefCount(pRet); +} + +static +int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); +} + +static +int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +/* +** Implementation of the scalar function fts2_tokenizer_internal_test(). +** This function is used for testing only, it is not included in the +** build unless SQLITE_TEST is defined. +** +** The purpose of this is to test that the fts2_tokenizer() function +** can be used as designed by the C-code in the queryTokenizer and +** registerTokenizer() functions above. These two functions are repeated +** in the README.tokenizer file as an example, so it is important to +** test them. +** +** To run the tests, evaluate the fts2_tokenizer_internal_test() scalar +** function with no arguments. An assert() will fail if a problem is +** detected. i.e.: +** +** SELECT fts2_tokenizer_internal_test(); +** +*/ +static void intTestFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int rc; + const sqlite3_tokenizer_module *p1; + const sqlite3_tokenizer_module *p2; + sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); + + /* Test the query function */ + sqlite3Fts2SimpleTokenizerModule(&p1); + rc = queryTokenizer(db, "simple", &p2); + assert( rc==SQLITE_OK ); + assert( p1==p2 ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_ERROR ); + assert( p2==0 ); + assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); + + /* Test the storage function */ + rc = registerTokenizer(db, "nosuchtokenizer", p1); + assert( rc==SQLITE_OK ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_OK ); + assert( p2==p1 ); + + sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); +} + +#endif + +/* +** Set up SQL objects in database db used to access the contents of +** the hash table pointed to by argument pHash. The hash table must +** been initialised to use string keys, and to take a private copy +** of the key when a value is inserted. i.e. by a call similar to: +** +** sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); +** +** This function adds a scalar function (see header comment above +** scalarFunc() in this file for details) and, if ENABLE_TABLE is +** defined at compilation time, a temporary virtual table (see header +** comment above struct HashTableVtab) to the database schema. Both +** provide read/write access to the contents of *pHash. +** +** The third argument to this function, zName, is used as the name +** of both the scalar and, if created, the virtual table. +*/ +int sqlite3Fts2InitHashTable( + sqlite3 *db, + fts2Hash *pHash, + const char *zName +){ + int rc = SQLITE_OK; + void *p = (void *)pHash; + const int any = SQLITE_ANY; + char *zTest = 0; + char *zTest2 = 0; + +#ifdef SQLITE_TEST + void *pdb = (void *)db; + zTest = sqlite3_mprintf("%s_test", zName); + zTest2 = sqlite3_mprintf("%s_internal_test", zName); + if( !zTest || !zTest2 ){ + rc = SQLITE_NOMEM; + } +#endif + + if( rc!=SQLITE_OK + || (rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0)) +#ifdef SQLITE_TEST + || (rc = sqlite3_create_function(db, zTest, 2, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest, 3, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0)) +#endif + ); + + sqlite3_free(zTest); + sqlite3_free(zTest2); + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer.h new file mode 100644 index 0000000000..8c256b2bed --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer.h @@ -0,0 +1,145 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS2_TOKENIZER_H_ +#define _FTS2_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. When a new tokenizer +** implementation is registered, the caller provides a pointer to +** an sqlite3_tokenizer_module containing pointers to the callback +** functions that make up an implementation. +** +** When an fts2 table is created, it passes any arguments passed to +** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the +** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer +** implementation. The xCreate() function in turn returns an +** sqlite3_tokenizer structure representing the specific tokenizer to +** be used for the fts2 table (customized by the tokenizer clause arguments). +** +** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() +** method is called. It returns an sqlite3_tokenizer_cursor object +** that may be used to tokenize a specific input buffer based on +** the tokenization rules supplied by a specific sqlite3_tokenizer +** object. +*/ +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; + +struct sqlite3_tokenizer_module { + + /* + ** Structure version. Should always be set to 0. + */ + int iVersion; + + /* + ** Create a new tokenizer. The values in the argv[] array are the + ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL + ** TABLE statement that created the fts2 table. For example, if + ** the following SQL is executed: + ** + ** CREATE .. USING fts2( ... , tokenizer arg1 arg2) + ** + ** then argc is set to 2, and the argv[] array contains pointers + ** to the strings "arg1" and "arg2". + ** + ** This method should return either SQLITE_OK (0), or an SQLite error + ** code. If SQLITE_OK is returned, then *ppTokenizer should be set + ** to point at the newly created tokenizer structure. The generic + ** sqlite3_tokenizer.pModule variable should not be initialised by + ** this callback. The caller will do so. + */ + int (*xCreate)( + int argc, /* Size of argv array */ + const char *const*argv, /* Tokenizer argument strings */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ + ); + + /* + ** Destroy an existing tokenizer. The fts2 module calls this method + ** exactly once for each successful call to xCreate(). + */ + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Create a tokenizer cursor to tokenize an input buffer. The caller + ** is responsible for ensuring that the input buffer remains valid + ** until the cursor is closed (using the xClose() method). + */ + int (*xOpen)( + sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ + const char *pInput, int nBytes, /* Input buffer */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ + ); + + /* + ** Destroy an existing tokenizer cursor. The fts2 module calls this + ** method exactly once for each successful call to xOpen(). + */ + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + + /* + ** Retrieve the next token from the tokenizer cursor pCursor. This + ** method should either return SQLITE_OK and set the values of the + ** "OUT" variables identified below, or SQLITE_DONE to indicate that + ** the end of the buffer has been reached, or an SQLite error code. + ** + ** *ppToken should be set to point at a buffer containing the + ** normalized version of the token (i.e. after any case-folding and/or + ** stemming has been performed). *pnBytes should be set to the length + ** of this buffer in bytes. The input text that generated the token is + ** identified by the byte offsets returned in *piStartOffset and + ** *piEndOffset. + ** + ** The buffer *ppToken is set to point at is managed by the tokenizer + ** implementation. It is only required to be valid until the next call + ** to xNext() or xClose(). + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xNext)( + sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ + const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ + int *piStartOffset, /* OUT: Byte offset of token in input buffer */ + int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ + int *piPosition /* OUT: Number of tokens returned before this one */ + ); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +#endif /* _FTS2_TOKENIZER_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer1.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer1.c new file mode 100644 index 0000000000..540ba27def --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/fts2_tokenizer1.c @@ -0,0 +1,229 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + + +#include +#include +#include +#include +#include + +#include "fts2_tokenizer.h" + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module simpleTokenizerModule; + +static int simpleDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = strlen(argv[1]); + for(i=0; i=0x80 ){ + free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !isalnum(i); + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + free(c->pToken); + free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffsetnBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffsetnBytes && simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnBytes && !simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + c->nTokenAllocated = n+20; + c->pToken = realloc(c->pToken, c->nTokenAllocated); + if( c->pToken==NULL ) return SQLITE_NOMEM; + } + for(i=0; ipToken[i] = ch<0x80 ? tolower(ch) : ch; + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts2SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/mkfts2amal.tcl b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/mkfts2amal.tcl new file mode 100644 index 0000000000..5c8d1e93d7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts2/mkfts2amal.tcl @@ -0,0 +1,116 @@ +#!/usr/bin/tclsh +# +# This script builds a single C code file holding all of FTS2 code. +# The name of the output file is fts2amal.c. To build this file, +# first do: +# +# make target_source +# +# The make target above moves all of the source code files into +# a subdirectory named "tsrc". (This script expects to find the files +# there and will not work if they are not found.) +# +# After the "tsrc" directory has been created and populated, run +# this script: +# +# tclsh mkfts2amal.tcl +# +# The amalgamated FTS2 code will be written into fts2amal.c +# + +# Open the output file and write a header comment at the beginning +# of the file. +# +set out [open fts2amal.c w] +set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1] +puts $out [subst \ +{/****************************************************************************** +** This file is an amalgamation of separate C source files from the SQLite +** Full Text Search extension 2 (fts2). By combining all the individual C +** code files into this single large file, the entire code can be compiled +** as a one translation unit. This allows many compilers to do optimizations +** that would not be possible if the files were compiled separately. It also +** makes the code easier to import into other projects. +** +** This amalgamation was generated on $today. +*/}] + +# These are the header files used by FTS2. The first time any of these +# files are seen in a #include statement in the C code, include the complete +# text of the file in-line. The file only needs to be included once. +# +foreach hdr { + fts2.h + fts2_hash.h + fts2_tokenizer.h + sqlite3.h + sqlite3ext.h +} { + set available_hdr($hdr) 1 +} + +# 78 stars used for comment formatting. +set s78 \ +{*****************************************************************************} + +# Insert a comment into the code +# +proc section_comment {text} { + global out s78 + set n [string length $text] + set nstar [expr {60 - $n}] + set stars [string range $s78 0 $nstar] + puts $out "/************** $text $stars/" +} + +# Read the source file named $filename and write it into the +# sqlite3.c output file. If any #include statements are seen, +# process them approprately. +# +proc copy_file {filename} { + global seen_hdr available_hdr out + set tail [file tail $filename] + section_comment "Begin file $tail" + set in [open $filename r] + while {![eof $in]} { + set line [gets $in] + if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { + if {[info exists available_hdr($hdr)]} { + if {$available_hdr($hdr)} { + section_comment "Include $hdr in the middle of $tail" + copy_file tsrc/$hdr + section_comment "Continuing where we left off in $tail" + } + } elseif {![info exists seen_hdr($hdr)]} { + set seen_hdr($hdr) 1 + puts $out $line + } + } elseif {[regexp {^#ifdef __cplusplus} $line]} { + puts $out "#if 0" + } elseif {[regexp {^#line} $line]} { + # Skip #line directives. + } else { + puts $out $line + } + } + close $in + section_comment "End of $tail" +} + + +# Process the source files. Process files containing commonly +# used subroutines first in order to help the compiler find +# inlining opportunities. +# +foreach file { + fts2.c + fts2_hash.c + fts2_porter.c + fts2_tokenizer.c + fts2_tokenizer1.c + fts2_icu.c +} { + copy_file tsrc/$file +} + +close $out diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/README.tokenizers b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/README.tokenizers new file mode 100644 index 0000000000..f214b24552 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/README.tokenizers @@ -0,0 +1,134 @@ + +1. FTS3 Tokenizers + + When creating a new full-text table, FTS3 allows the user to select + the text tokenizer implementation to be used when indexing text + by specifying a "tokenizer" clause as part of the CREATE VIRTUAL TABLE + statement: + + CREATE VIRTUAL TABLE USING fts3( + [, tokenizer []] + ); + + The built-in tokenizers (valid values to pass as ) are + "simple" and "porter". + + should consist of zero or more white-space separated + arguments to pass to the selected tokenizer implementation. The + interpretation of the arguments, if any, depends on the individual + tokenizer. + +2. Custom Tokenizers + + FTS3 allows users to provide custom tokenizer implementations. The + interface used to create a new tokenizer is defined and described in + the fts3_tokenizer.h source file. + + Registering a new FTS3 tokenizer is similar to registering a new + virtual table module with SQLite. The user passes a pointer to a + structure containing pointers to various callback functions that + make up the implementation of the new tokenizer type. For tokenizers, + the structure (defined in fts3_tokenizer.h) is called + "sqlite3_tokenizer_module". + + FTS3 does not expose a C-function that users call to register new + tokenizer types with a database handle. Instead, the pointer must + be encoded as an SQL blob value and passed to FTS3 through the SQL + engine by evaluating a special scalar function, "fts3_tokenizer()". + The fts3_tokenizer() function may be called with one or two arguments, + as follows: + + SELECT fts3_tokenizer(); + SELECT fts3_tokenizer(, ); + + Where is a string identifying the tokenizer and + is a pointer to an sqlite3_tokenizer_module + structure encoded as an SQL blob. If the second argument is present, + it is registered as tokenizer and a copy of it + returned. If only one argument is passed, a pointer to the tokenizer + implementation currently registered as is returned, + encoded as a blob. Or, if no such tokenizer exists, an SQL exception + (error) is raised. + + SECURITY: If the fts3 extension is used in an environment where potentially + malicious users may execute arbitrary SQL (i.e. gears), they should be + prevented from invoking the fts3_tokenizer() function, possibly using the + authorisation callback. + + See "Sample code" below for an example of calling the fts3_tokenizer() + function from C code. + +3. ICU Library Tokenizers + + If this extension is compiled with the SQLITE_ENABLE_ICU pre-processor + symbol defined, then there exists a built-in tokenizer named "icu" + implemented using the ICU library. The first argument passed to the + xCreate() method (see fts3_tokenizer.h) of this tokenizer may be + an ICU locale identifier. For example "tr_TR" for Turkish as used + in Turkey, or "en_AU" for English as used in Australia. For example: + + "CREATE VIRTUAL TABLE thai_text USING fts3(text, tokenizer icu th_TH)" + + The ICU tokenizer implementation is very simple. It splits the input + text according to the ICU rules for finding word boundaries and discards + any tokens that consist entirely of white-space. This may be suitable + for some applications in some locales, but not all. If more complex + processing is required, for example to implement stemming or + discard punctuation, this can be done by creating a tokenizer + implementation that uses the ICU tokenizer as part of it's implementation. + + When using the ICU tokenizer this way, it is safe to overwrite the + contents of the strings returned by the xNext() method (see + fts3_tokenizer.h). + +4. Sample code. + + The following two code samples illustrate the way C code should invoke + the fts3_tokenizer() scalar function: + + int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); + } + + int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); + } + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/README.txt b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/README.txt new file mode 100644 index 0000000000..517a2a0434 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/README.txt @@ -0,0 +1,4 @@ +This folder contains source code to the second full-text search +extension for SQLite. While the API is the same, this version uses a +substantially different storage schema from fts1, so tables will need +to be rebuilt. diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3.c new file mode 100644 index 0000000000..b39291975a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3.c @@ -0,0 +1,5971 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is an SQLite module implementing full-text search. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ + +/* TODO(shess) Consider exporting this comment to an HTML file or the +** wiki. +*/ +/* The full-text index is stored in a series of b+tree (-like) +** structures called segments which map terms to doclists. The +** structures are like b+trees in layout, but are constructed from the +** bottom up in optimal fashion and are not updatable. Since trees +** are built from the bottom up, things will be described from the +** bottom up. +** +** +**** Varints **** +** The basic unit of encoding is a variable-length integer called a +** varint. We encode variable-length integers in little-endian order +** using seven bits * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +** +** This is identical to how sqlite encodes varints (see util.c). +** +** +**** Document lists **** +** A doclist (document list) holds a docid-sorted list of hits for a +** given term. Doclists hold docids, and can optionally associate +** token positions and offsets with docids. +** +** A DL_POSITIONS_OFFSETS doclist is stored like this: +** +** array { +** varint docid; +** array { (position list for column 0) +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset; (delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** array { +** varint POS_COLUMN; (marks start of position list for new column) +** varint column; (index of new column) +** array { +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset;(delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** } +** varint POS_END; (marks end of positions for this document. +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. A "position" is an index of a token in the token stream +** generated by the tokenizer, while an "offset" is a byte offset, +** both based at 0. Note that POS_END and POS_COLUMN occur in the +** same logical place as the position element, and act as sentinals +** ending a position list array. +** +** A DL_POSITIONS doclist omits the startOffset and endOffset +** information. A DL_DOCIDS doclist omits both the position and +** offset information, becoming an array of varint-encoded docids. +** +** On-disk data is stored as type DL_DEFAULT, so we don't serialize +** the type. Due to how deletion is implemented in the segmentation +** system, on-disk doclists MUST store at least positions. +** +** +**** Segment leaf nodes **** +** Segment leaf nodes store terms and doclists, ordered by term. Leaf +** nodes are written using LeafWriter, and read using LeafReader (to +** iterate through a single leaf node's data) and LeavesReader (to +** iterate through a segment's entire leaf layer). Leaf nodes have +** the format: +** +** varint iHeight; (height from leaf level, always 0) +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of prefix shared with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix];(unshared suffix of next term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. +** +** Leaf nodes are broken into blocks which are stored contiguously in +** the %_segments table in sorted order. This means that when the end +** of a node is reached, the next term is in the node with the next +** greater node id. +** +** New data is spilled to a new leaf node when the current node +** exceeds LEAF_MAX bytes (default 2048). New data which itself is +** larger than STANDALONE_MIN (default 1024) is placed in a standalone +** node (a leaf node with a single term and doclist). The goal of +** these settings is to pack together groups of small doclists while +** making it efficient to directly access large doclists. The +** assumption is that large doclists represent terms which are more +** likely to be query targets. +** +** TODO(shess) It may be useful for blocking decisions to be more +** dynamic. For instance, it may make more sense to have a 2.5k leaf +** node rather than splitting into 2k and .5k nodes. My intuition is +** that this might extend through 2x or 4x the pagesize. +** +** +**** Segment interior nodes **** +** Segment interior nodes store blockids for subtree nodes and terms +** to describe what data is stored by the each subtree. Interior +** nodes are written using InteriorWriter, and read using +** InteriorReader. InteriorWriters are created as needed when +** SegmentWriter creates new leaf nodes, or when an interior node +** itself grows too big and must be split. The format of interior +** nodes: +** +** varint iHeight; (height from leaf level, always >0) +** varint iBlockid; (block id of node's leftmost subtree) +** optional { +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of shared prefix with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix]; (unshared suffix of next term) +** } +** } +** +** Here, optional { X } means an optional element, while array { X } +** means zero or more occurrences of X, adjacent in memory. +** +** An interior node encodes n terms separating n+1 subtrees. The +** subtree blocks are contiguous, so only the first subtree's blockid +** is encoded. The subtree at iBlockid will contain all terms less +** than the first term encoded (or all terms if no term is encoded). +** Otherwise, for terms greater than or equal to pTerm[i] but less +** than pTerm[i+1], the subtree for that term will be rooted at +** iBlockid+i. Interior nodes only store enough term data to +** distinguish adjacent children (if the rightmost term of the left +** child is "something", and the leftmost term of the right child is +** "wicked", only "w" is stored). +** +** New data is spilled to a new interior node at the same height when +** the current node exceeds INTERIOR_MAX bytes (default 2048). +** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing +** interior nodes and making the tree too skinny. The interior nodes +** at a given height are naturally tracked by interior nodes at +** height+1, and so on. +** +** +**** Segment directory **** +** The segment directory in table %_segdir stores meta-information for +** merging and deleting segments, and also the root node of the +** segment's tree. +** +** The root node is the top node of the segment's tree after encoding +** the entire segment, restricted to ROOT_MAX bytes (default 1024). +** This could be either a leaf node or an interior node. If the top +** node requires more than ROOT_MAX bytes, it is flushed to %_segments +** and a new root interior node is generated (which should always fit +** within ROOT_MAX because it only needs space for 2 varints, the +** height and the blockid of the previous root). +** +** The meta-information in the segment directory is: +** level - segment level (see below) +** idx - index within level +** - (level,idx uniquely identify a segment) +** start_block - first leaf node +** leaves_end_block - last leaf node +** end_block - last block (including interior nodes) +** root - contents of root node +** +** If the root node is a leaf node, then start_block, +** leaves_end_block, and end_block are all 0. +** +** +**** Segment merging **** +** To amortize update costs, segments are groups into levels and +** merged in matches. Each increase in level represents exponentially +** more documents. +** +** New documents (actually, document updates) are tokenized and +** written individually (using LeafWriter) to a level 0 segment, with +** incrementing idx. When idx reaches MERGE_COUNT (default 16), all +** level 0 segments are merged into a single level 1 segment. Level 1 +** is populated like level 0, and eventually MERGE_COUNT level 1 +** segments are merged to a single level 2 segment (representing +** MERGE_COUNT^2 updates), and so on. +** +** A segment merge traverses all segments at a given level in +** parallel, performing a straightforward sorted merge. Since segment +** leaf nodes are written in to the %_segments table in order, this +** merge traverses the underlying sqlite disk structures efficiently. +** After the merge, all segment blocks from the merged level are +** deleted. +** +** MERGE_COUNT controls how often we merge segments. 16 seems to be +** somewhat of a sweet spot for insertion performance. 32 and 64 show +** very similar performance numbers to 16 on insertion, though they're +** a tiny bit slower (perhaps due to more overhead in merge-time +** sorting). 8 is about 20% slower than 16, 4 about 50% slower than +** 16, 2 about 66% slower than 16. +** +** At query time, high MERGE_COUNT increases the number of segments +** which need to be scanned and merged. For instance, with 100k docs +** inserted: +** +** MERGE_COUNT segments +** 16 25 +** 8 12 +** 4 10 +** 2 6 +** +** This appears to have only a moderate impact on queries for very +** frequent terms (which are somewhat dominated by segment merge +** costs), and infrequent and non-existent terms still seem to be fast +** even with many segments. +** +** TODO(shess) That said, it would be nice to have a better query-side +** argument for MERGE_COUNT of 16. Also, it's possible/likely that +** optimizations to things like doclist merging will swing the sweet +** spot around. +** +** +** +**** Handling of deletions and updates **** +** Since we're using a segmented structure, with no docid-oriented +** index into the term index, we clearly cannot simply update the term +** index when a document is deleted or updated. For deletions, we +** write an empty doclist (varint(docid) varint(POS_END)), for updates +** we simply write the new doclist. Segment merges overwrite older +** data for a particular docid with newer data, so deletes or updates +** will eventually overtake the earlier data and knock it out. The +** query logic likewise merges doclists so that newer data knocks out +** older data. +** +** TODO(shess) Provide a VACUUM type operation to clear out all +** deletions and duplications. This would basically be a forced merge +** into a single segment. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#if defined(SQLITE_ENABLE_FTS3) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#include +#include +#include +#include +#include + +#include "fts3.h" +#include "fts3_hash.h" +#include "fts3_tokenizer.h" +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + + +/* TODO(shess) MAN, this thing needs some refactoring. At minimum, it +** would be nice to order the file better, perhaps something along the +** lines of: +** +** - utility functions +** - table setup functions +** - table update functions +** - table query functions +** +** Put the query functions last because they're likely to reference +** typedefs or functions from the table update section. +*/ + +#if 0 +# define TRACE(A) printf A; fflush(stdout) +#else +# define TRACE(A) +#endif + +/* It is not safe to call isspace(), tolower(), or isalnum() on +** hi-bit-set characters. This is the same solution used in the +** tokenizer. +*/ +/* TODO(shess) The snippet-generation code should be using the +** tokenizer-generated tokens rather than doing its own local +** tokenization. +*/ +/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ +static int safe_isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} +static int safe_tolower(char c){ + return (c&0x80)==0 ? tolower(c) : c; +} +static int safe_isalnum(char c){ + return (c&0x80)==0 ? isalnum(c) : 0; +} + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +/* +** By default, only positions and not offsets are stored in the doclists. +** To change this so that offsets are stored too, compile with +** +** -DDL_DEFAULT=DL_POSITIONS_OFFSETS +** +** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted +** into (no deletes or updates). +*/ +#ifndef DL_DEFAULT +# define DL_DEFAULT DL_POSITIONS +#endif + +enum { + POS_END = 0, /* end of this position list */ + POS_COLUMN, /* followed by new column number */ + POS_BASE +}; + +/* MERGE_COUNT controls how often we merge segments (see comment at +** top of file). +*/ +#define MERGE_COUNT 16 + +/* utility functions */ + +/* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single +** record to prevent errors of the form: +** +** my_function(SomeType *b){ +** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) +** } +*/ +/* TODO(shess) Obvious candidates for a header file. */ +#define CLEAR(b) memset(b, '\0', sizeof(*(b))) + +#ifndef NDEBUG +# define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) +#else +# define SCRAMBLE(b) +#endif + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int putVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int getVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int getVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = getVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*******************************************************************/ +/* DataBuffer is used to collect data into a buffer in piecemeal +** fashion. It implements the usual distinction between amount of +** data currently stored (nData) and buffer capacity (nCapacity). +** +** dataBufferInit - create a buffer with given initial capacity. +** dataBufferReset - forget buffer's data, retaining capacity. +** dataBufferDestroy - free buffer's data. +** dataBufferExpand - expand capacity without adding data. +** dataBufferAppend - append data. +** dataBufferAppend2 - append two pieces of data at once. +** dataBufferReplace - replace buffer's data. +*/ +typedef struct DataBuffer { + char *pData; /* Pointer to malloc'ed buffer. */ + int nCapacity; /* Size of pData buffer. */ + int nData; /* End of data loaded into pData. */ +} DataBuffer; + +static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ + assert( nCapacity>=0 ); + pBuffer->nData = 0; + pBuffer->nCapacity = nCapacity; + pBuffer->pData = nCapacity==0 ? NULL : malloc(nCapacity); +} +static void dataBufferReset(DataBuffer *pBuffer){ + pBuffer->nData = 0; +} +static void dataBufferDestroy(DataBuffer *pBuffer){ + if( pBuffer->pData!=NULL ) free(pBuffer->pData); + SCRAMBLE(pBuffer); +} +static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ + assert( nAddCapacity>0 ); + /* TODO(shess) Consider expanding more aggressively. Note that the + ** underlying malloc implementation may take care of such things for + ** us already. + */ + if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ + pBuffer->nCapacity = pBuffer->nData+nAddCapacity; + pBuffer->pData = realloc(pBuffer->pData, pBuffer->nCapacity); + } +} +static void dataBufferAppend(DataBuffer *pBuffer, + const char *pSource, int nSource){ + assert( nSource>0 && pSource!=NULL ); + dataBufferExpand(pBuffer, nSource); + memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); + pBuffer->nData += nSource; +} +static void dataBufferAppend2(DataBuffer *pBuffer, + const char *pSource1, int nSource1, + const char *pSource2, int nSource2){ + assert( nSource1>0 && pSource1!=NULL ); + assert( nSource2>0 && pSource2!=NULL ); + dataBufferExpand(pBuffer, nSource1+nSource2); + memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); + memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); + pBuffer->nData += nSource1+nSource2; +} +static void dataBufferReplace(DataBuffer *pBuffer, + const char *pSource, int nSource){ + dataBufferReset(pBuffer); + dataBufferAppend(pBuffer, pSource, nSource); +} + +/* StringBuffer is a null-terminated version of DataBuffer. */ +typedef struct StringBuffer { + DataBuffer b; /* Includes null terminator. */ +} StringBuffer; + +static void initStringBuffer(StringBuffer *sb){ + dataBufferInit(&sb->b, 100); + dataBufferReplace(&sb->b, "", 1); +} +static int stringBufferLength(StringBuffer *sb){ + return sb->b.nData-1; +} +static char *stringBufferData(StringBuffer *sb){ + return sb->b.pData; +} +static void stringBufferDestroy(StringBuffer *sb){ + dataBufferDestroy(&sb->b); +} + +static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ + assert( sb->b.nData>0 ); + if( nFrom>0 ){ + sb->b.nData--; + dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); + } +} +static void append(StringBuffer *sb, const char *zFrom){ + nappend(sb, zFrom, strlen(zFrom)); +} + +/* Append a list of strings separated by commas. */ +static void appendList(StringBuffer *sb, int nString, char **azString){ + int i; + for(i=0; i0 ) append(sb, ", "); + append(sb, azString[i]); + } +} + +static int endsInWhiteSpace(StringBuffer *p){ + return stringBufferLength(p)>0 && + safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); +} + +/* If the StringBuffer ends in something other than white space, add a +** single space character to the end. +*/ +static void appendWhiteSpace(StringBuffer *p){ + if( stringBufferLength(p)==0 ) return; + if( !endsInWhiteSpace(p) ) append(p, " "); +} + +/* Remove white space from the end of the StringBuffer */ +static void trimWhiteSpace(StringBuffer *p){ + while( endsInWhiteSpace(p) ){ + p->b.pData[--p->b.nData-1] = '\0'; + } +} + +/*******************************************************************/ +/* DLReader is used to read document elements from a doclist. The +** current docid is cached, so dlrDocid() is fast. DLReader does not +** own the doclist buffer. +** +** dlrAtEnd - true if there's no more data to read. +** dlrDocid - docid of current document. +** dlrDocData - doclist data for current document (including docid). +** dlrDocDataBytes - length of same. +** dlrAllDataBytes - length of all remaining data. +** dlrPosData - position data for current document. +** dlrPosDataLen - length of pos data for current document (incl POS_END). +** dlrStep - step to current document. +** dlrInit - initial for doclist of given type against given data. +** dlrDestroy - clean up. +** +** Expected usage is something like: +** +** DLReader reader; +** dlrInit(&reader, pData, nData); +** while( !dlrAtEnd(&reader) ){ +** // calls to dlrDocid() and kin. +** dlrStep(&reader); +** } +** dlrDestroy(&reader); +*/ +typedef struct DLReader { + DocListType iType; + const char *pData; + int nData; + + sqlite_int64 iDocid; + int nElement; +} DLReader; + +static int dlrAtEnd(DLReader *pReader){ + assert( pReader->nData>=0 ); + return pReader->nData==0; +} +static sqlite_int64 dlrDocid(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->iDocid; +} +static const char *dlrDocData(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->pData; +} +static int dlrDocDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nElement; +} +static int dlrAllDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nData; +} +/* TODO(shess) Consider adding a field to track iDocid varint length +** to make these two functions faster. This might matter (a tiny bit) +** for queries. +*/ +static const char *dlrPosData(DLReader *pReader){ + sqlite_int64 iDummy; + int n = getVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->pData+n; +} +static int dlrPosDataLen(DLReader *pReader){ + sqlite_int64 iDummy; + int n = getVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->nElement-n; +} +static void dlrStep(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + + /* Skip past current doclist element. */ + assert( pReader->nElement<=pReader->nData ); + pReader->pData += pReader->nElement; + pReader->nData -= pReader->nElement; + + /* If there is more data, read the next doclist element. */ + if( pReader->nData!=0 ){ + sqlite_int64 iDocidDelta; + int iDummy, n = getVarint(pReader->pData, &iDocidDelta); + pReader->iDocid += iDocidDelta; + if( pReader->iType>=DL_POSITIONS ){ + assert( nnData ); + while( 1 ){ + n += getVarint32(pReader->pData+n, &iDummy); + assert( n<=pReader->nData ); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += getVarint32(pReader->pData+n, &iDummy); + assert( nnData ); + }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += getVarint32(pReader->pData+n, &iDummy); + n += getVarint32(pReader->pData+n, &iDummy); + assert( nnData ); + } + } + } + pReader->nElement = n; + assert( pReader->nElement<=pReader->nData ); + } +} +static void dlrInit(DLReader *pReader, DocListType iType, + const char *pData, int nData){ + assert( pData!=NULL && nData!=0 ); + pReader->iType = iType; + pReader->pData = pData; + pReader->nData = nData; + pReader->nElement = 0; + pReader->iDocid = 0; + + /* Load the first element's data. There must be a first element. */ + dlrStep(pReader); +} +static void dlrDestroy(DLReader *pReader){ + SCRAMBLE(pReader); +} + +#ifndef NDEBUG +/* Verify that the doclist can be validly decoded. Also returns the +** last docid found because it's convenient in other assertions for +** DLWriter. +*/ +static void docListValidate(DocListType iType, const char *pData, int nData, + sqlite_int64 *pLastDocid){ + sqlite_int64 iPrevDocid = 0; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + while( nData!=0 ){ + sqlite_int64 iDocidDelta; + int n = getVarint(pData, &iDocidDelta); + iPrevDocid += iDocidDelta; + if( iType>DL_DOCIDS ){ + int iDummy; + while( 1 ){ + n += getVarint32(pData+n, &iDummy); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += getVarint32(pData+n, &iDummy); + }else if( iType>DL_POSITIONS ){ + n += getVarint32(pData+n, &iDummy); + n += getVarint32(pData+n, &iDummy); + } + assert( n<=nData ); + } + } + assert( n<=nData ); + pData += n; + nData -= n; + } + if( pLastDocid ) *pLastDocid = iPrevDocid; +} +#define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) +#else +#define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) +#endif + +/*******************************************************************/ +/* DLWriter is used to write doclist data to a DataBuffer. DLWriter +** always appends to the buffer and does not own it. +** +** dlwInit - initialize to write a given type doclistto a buffer. +** dlwDestroy - clear the writer's memory. Does not free buffer. +** dlwAppend - append raw doclist data to buffer. +** dlwCopy - copy next doclist from reader to writer. +** dlwAdd - construct doclist element and append to buffer. +** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). +*/ +typedef struct DLWriter { + DocListType iType; + DataBuffer *b; + sqlite_int64 iPrevDocid; +#ifndef NDEBUG + int has_iPrevDocid; +#endif +} DLWriter; + +static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ + pWriter->b = b; + pWriter->iType = iType; + pWriter->iPrevDocid = 0; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 0; +#endif +} +static void dlwDestroy(DLWriter *pWriter){ + SCRAMBLE(pWriter); +} +/* iFirstDocid is the first docid in the doclist in pData. It is +** needed because pData may point within a larger doclist, in which +** case the first item would be delta-encoded. +** +** iLastDocid is the final docid in the doclist in pData. It is +** needed to create the new iPrevDocid for future delta-encoding. The +** code could decode the passed doclist to recreate iLastDocid, but +** the only current user (docListMerge) already has decoded this +** information. +*/ +/* TODO(shess) This has become just a helper for docListMerge. +** Consider a refactor to make this cleaner. +*/ +static void dlwAppend(DLWriter *pWriter, + const char *pData, int nData, + sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ + sqlite_int64 iDocid = 0; + char c[VARINT_MAX]; + int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ +#ifndef NDEBUG + sqlite_int64 iLastDocidDelta; +#endif + + /* Recode the initial docid as delta from iPrevDocid. */ + nFirstOld = getVarint(pData, &iDocid); + assert( nFirstOldiType==DL_DOCIDS) ); + nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid); + + /* Verify that the incoming doclist is valid AND that it ends with + ** the expected docid. This is essential because we'll trust this + ** docid in future delta-encoding. + */ + ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); + assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); + + /* Append recoded initial docid and everything else. Rest of docids + ** should have been delta-encoded from previous initial docid. + */ + if( nFirstOldb, c, nFirstNew, + pData+nFirstOld, nData-nFirstOld); + }else{ + dataBufferAppend(pWriter->b, c, nFirstNew); + } + pWriter->iPrevDocid = iLastDocid; +} +static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ + dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), + dlrDocid(pReader), dlrDocid(pReader)); +} +static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n = putVarint(c, iDocid-pWriter->iPrevDocid); + + /* Docids must ascend. */ + assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); + assert( pWriter->iType==DL_DOCIDS ); + + dataBufferAppend(pWriter->b, c, n); + pWriter->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 1; +#endif +} + +/*******************************************************************/ +/* PLReader is used to read data from a document's position list. As +** the caller steps through the list, data is cached so that varints +** only need to be decoded once. +** +** plrInit, plrDestroy - create/destroy a reader. +** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors +** plrAtEnd - at end of stream, only call plrDestroy once true. +** plrStep - step to the next element. +*/ +typedef struct PLReader { + /* These refer to the next position's data. nData will reach 0 when + ** reading the last position, so plrStep() signals EOF by setting + ** pData to NULL. + */ + const char *pData; + int nData; + + DocListType iType; + int iColumn; /* the last column read */ + int iPosition; /* the last position read */ + int iStartOffset; /* the last start offset read */ + int iEndOffset; /* the last end offset read */ +} PLReader; + +static int plrAtEnd(PLReader *pReader){ + return pReader->pData==NULL; +} +static int plrColumn(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iColumn; +} +static int plrPosition(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iPosition; +} +static int plrStartOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iStartOffset; +} +static int plrEndOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iEndOffset; +} +static void plrStep(PLReader *pReader){ + int i, n; + + assert( !plrAtEnd(pReader) ); + + if( pReader->nData==0 ){ + pReader->pData = NULL; + return; + } + + n = getVarint32(pReader->pData, &i); + if( i==POS_COLUMN ){ + n += getVarint32(pReader->pData+n, &pReader->iColumn); + pReader->iPosition = 0; + pReader->iStartOffset = 0; + n += getVarint32(pReader->pData+n, &i); + } + /* Should never see adjacent column changes. */ + assert( i!=POS_COLUMN ); + + if( i==POS_END ){ + pReader->nData = 0; + pReader->pData = NULL; + return; + } + + pReader->iPosition += i-POS_BASE; + if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += getVarint32(pReader->pData+n, &i); + pReader->iStartOffset += i; + n += getVarint32(pReader->pData+n, &i); + pReader->iEndOffset = pReader->iStartOffset+i; + } + assert( n<=pReader->nData ); + pReader->pData += n; + pReader->nData -= n; +} + +static void plrInit(PLReader *pReader, DLReader *pDLReader){ + pReader->pData = dlrPosData(pDLReader); + pReader->nData = dlrPosDataLen(pDLReader); + pReader->iType = pDLReader->iType; + pReader->iColumn = 0; + pReader->iPosition = 0; + pReader->iStartOffset = 0; + pReader->iEndOffset = 0; + plrStep(pReader); +} +static void plrDestroy(PLReader *pReader){ + SCRAMBLE(pReader); +} + +/*******************************************************************/ +/* PLWriter is used in constructing a document's position list. As a +** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. +** PLWriter writes to the associated DLWriter's buffer. +** +** plwInit - init for writing a document's poslist. +** plwDestroy - clear a writer. +** plwAdd - append position and offset information. +** plwCopy - copy next position's data from reader to writer. +** plwTerminate - add any necessary doclist terminator. +** +** Calling plwAdd() after plwTerminate() may result in a corrupt +** doclist. +*/ +/* TODO(shess) Until we've written the second item, we can cache the +** first item's information. Then we'd have three states: +** +** - initialized with docid, no positions. +** - docid and one position. +** - docid and multiple positions. +** +** Only the last state needs to actually write to dlw->b, which would +** be an improvement in the DLCollector case. +*/ +typedef struct PLWriter { + DLWriter *dlw; + + int iColumn; /* the last column written */ + int iPos; /* the last position written */ + int iOffset; /* the last start offset written */ +} PLWriter; + +/* TODO(shess) In the case where the parent is reading these values +** from a PLReader, we could optimize to a copy if that PLReader has +** the same type as pWriter. +*/ +static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, + ** iStartOffsetDelta, and iEndOffsetDelta. + */ + char c[5*VARINT_MAX]; + int n = 0; + + /* Ban plwAdd() after plwTerminate(). */ + assert( pWriter->iPos!=-1 ); + + if( pWriter->dlw->iType==DL_DOCIDS ) return; + + if( iColumn!=pWriter->iColumn ){ + n += putVarint(c+n, POS_COLUMN); + n += putVarint(c+n, iColumn); + pWriter->iColumn = iColumn; + pWriter->iPos = 0; + pWriter->iOffset = 0; + } + assert( iPos>=pWriter->iPos ); + n += putVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); + pWriter->iPos = iPos; + if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ + assert( iStartOffset>=pWriter->iOffset ); + n += putVarint(c+n, iStartOffset-pWriter->iOffset); + pWriter->iOffset = iStartOffset; + assert( iEndOffset>=iStartOffset ); + n += putVarint(c+n, iEndOffset-iStartOffset); + } + dataBufferAppend(pWriter->dlw->b, c, n); +} +static void plwCopy(PLWriter *pWriter, PLReader *pReader){ + plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), + plrStartOffset(pReader), plrEndOffset(pReader)); +} +static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n; + + pWriter->dlw = dlw; + + /* Docids must ascend. */ + assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); + n = putVarint(c, iDocid-pWriter->dlw->iPrevDocid); + dataBufferAppend(pWriter->dlw->b, c, n); + pWriter->dlw->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->dlw->has_iPrevDocid = 1; +#endif + + pWriter->iColumn = 0; + pWriter->iPos = 0; + pWriter->iOffset = 0; +} +/* TODO(shess) Should plwDestroy() also terminate the doclist? But +** then plwDestroy() would no longer be just a destructor, it would +** also be doing work, which isn't consistent with the overall idiom. +** Another option would be for plwAdd() to always append any necessary +** terminator, so that the output is always correct. But that would +** add incremental work to the common case with the only benefit being +** API elegance. Punt for now. +*/ +static void plwTerminate(PLWriter *pWriter){ + if( pWriter->dlw->iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = putVarint(c, POS_END); + dataBufferAppend(pWriter->dlw->b, c, n); + } +#ifndef NDEBUG + /* Mark as terminated for assert in plwAdd(). */ + pWriter->iPos = -1; +#endif +} +static void plwDestroy(PLWriter *pWriter){ + SCRAMBLE(pWriter); +} + +/*******************************************************************/ +/* DLCollector wraps PLWriter and DLWriter to provide a +** dynamically-allocated doclist area to use during tokenization. +** +** dlcNew - malloc up and initialize a collector. +** dlcDelete - destroy a collector and all contained items. +** dlcAddPos - append position and offset information. +** dlcAddDoclist - add the collected doclist to the given buffer. +** dlcNext - terminate the current document and open another. +*/ +typedef struct DLCollector { + DataBuffer b; + DLWriter dlw; + PLWriter plw; +} DLCollector; + +/* TODO(shess) This could also be done by calling plwTerminate() and +** dataBufferAppend(). I tried that, expecting nominal performance +** differences, but it seemed to pretty reliably be worth 1% to code +** it this way. I suspect it's the incremental malloc overhead (some +** percentage of the plwTerminate() calls will cause a realloc), so +** this might be worth revisiting if the DataBuffer implementation +** changes. +*/ +static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ + if( pCollector->dlw.iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = putVarint(c, POS_END); + dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); + }else{ + dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); + } +} +static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ + plwTerminate(&pCollector->plw); + plwDestroy(&pCollector->plw); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); +} +static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); +} + +static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ + DLCollector *pCollector = malloc(sizeof(DLCollector)); + dataBufferInit(&pCollector->b, 0); + dlwInit(&pCollector->dlw, iType, &pCollector->b); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); + return pCollector; +} +static void dlcDelete(DLCollector *pCollector){ + plwDestroy(&pCollector->plw); + dlwDestroy(&pCollector->dlw); + dataBufferDestroy(&pCollector->b); + SCRAMBLE(pCollector); + free(pCollector); +} + + +/* Copy the doclist data of iType in pData/nData into *out, trimming +** unnecessary data as we go. Only columns matching iColumn are +** copied, all columns copied if iColumn is -1. Elements with no +** matching columns are dropped. The output is an iOutType doclist. +*/ +/* NOTE(shess) This code is only valid after all doclists are merged. +** If this is run before merges, then doclist items which represent +** deletion will be trimmed, and will thus not effect a deletion +** during the merge. +*/ +static void docListTrim(DocListType iType, const char *pData, int nData, + int iColumn, DocListType iOutType, DataBuffer *out){ + DLReader dlReader; + DLWriter dlWriter; + + assert( iOutType<=iType ); + + dlrInit(&dlReader, iType, pData, nData); + dlwInit(&dlWriter, iOutType, out); + + while( !dlrAtEnd(&dlReader) ){ + PLReader plReader; + PLWriter plWriter; + int match = 0; + + plrInit(&plReader, &dlReader); + + while( !plrAtEnd(&plReader) ){ + if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ + if( !match ){ + plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); + match = 1; + } + plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + } + plrStep(&plReader); + } + if( match ){ + plwTerminate(&plWriter); + plwDestroy(&plWriter); + } + + plrDestroy(&plReader); + dlrStep(&dlReader); + } + dlwDestroy(&dlWriter); + dlrDestroy(&dlReader); +} + +/* Used by docListMerge() to keep doclists in the ascending order by +** docid, then ascending order by age (so the newest comes first). +*/ +typedef struct OrderedDLReader { + DLReader *pReader; + + /* TODO(shess) If we assume that docListMerge pReaders is ordered by + ** age (which we do), then we could use pReader comparisons to break + ** ties. + */ + int idx; +} OrderedDLReader; + +/* Order eof to end, then by docid asc, idx desc. */ +static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ + if( dlrAtEnd(r1->pReader) ){ + if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ + return 1; /* Only r1 atEnd(). */ + } + if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ + + if( dlrDocid(r1->pReader)pReader) ) return -1; + if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; + + /* Descending on idx. */ + return r2->idx-r1->idx; +} + +/* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that +** p[1..n-1] is already sorted. +*/ +/* TODO(shess) Is this frequent enough to warrant a binary search? +** Before implementing that, instrument the code to check. In most +** current usage, I expect that p[0] will be less than p[1] a very +** high proportion of the time. +*/ +static void orderedDLReaderReorder(OrderedDLReader *p, int n){ + while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ + OrderedDLReader tmp = p[0]; + p[0] = p[1]; + p[1] = tmp; + n--; + p++; + } +} + +/* Given an array of doclist readers, merge their doclist elements +** into out in sorted order (by docid), dropping elements from older +** readers when there is a duplicate docid. pReaders is assumed to be +** ordered by age, oldest first. +*/ +/* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably +** be fixed. +*/ +static void docListMerge(DataBuffer *out, + DLReader *pReaders, int nReaders){ + OrderedDLReader readers[MERGE_COUNT]; + DLWriter writer; + int i, n; + const char *pStart = 0; + int nStart = 0; + sqlite_int64 iFirstDocid = 0, iLastDocid = 0; + + assert( nReaders>0 ); + if( nReaders==1 ){ + dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); + return; + } + + assert( nReaders<=MERGE_COUNT ); + n = 0; + for(i=0; i0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + + dlwInit(&writer, pReaders[0].iType, out); + while( !dlrAtEnd(readers[0].pReader) ){ + sqlite_int64 iDocid = dlrDocid(readers[0].pReader); + + /* If this is a continuation of the current buffer to copy, extend + ** that buffer. memcpy() seems to be more efficient if it has a + ** lots of data to copy. + */ + if( dlrDocData(readers[0].pReader)==pStart+nStart ){ + nStart += dlrDocDataBytes(readers[0].pReader); + }else{ + if( pStart!=0 ){ + dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + } + pStart = dlrDocData(readers[0].pReader); + nStart = dlrDocDataBytes(readers[0].pReader); + iFirstDocid = iDocid; + } + iLastDocid = iDocid; + dlrStep(readers[0].pReader); + + /* Drop all of the older elements with the same docid. */ + for(i=1; i0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + } + + /* Copy over any remaining elements. */ + if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + dlwDestroy(&writer); +} + +/* Helper function for posListUnion(). Compares the current position +** between left and right, returning as standard C idiom of <0 if +** left0 if left>right, and 0 if left==right. "End" always +** compares greater. +*/ +static int posListCmp(PLReader *pLeft, PLReader *pRight){ + assert( pLeft->iType==pRight->iType ); + if( pLeft->iType==DL_DOCIDS ) return 0; + + if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; + if( plrAtEnd(pRight) ) return -1; + + if( plrColumn(pLeft)plrColumn(pRight) ) return 1; + + if( plrPosition(pLeft)plrPosition(pRight) ) return 1; + if( pLeft->iType==DL_POSITIONS ) return 0; + + if( plrStartOffset(pLeft)plrStartOffset(pRight) ) return 1; + + if( plrEndOffset(pLeft)plrEndOffset(pRight) ) return 1; + + return 0; +} + +/* Write the union of position lists in pLeft and pRight to pOut. +** "Union" in this case meaning "All unique position tuples". Should +** work with any doclist type, though both inputs and the output +** should be the same type. +*/ +static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pLeft->iType==pRight->iType ); + assert( pLeft->iType==pOut->iType ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + plwInit(&writer, pOut, dlrDocid(pLeft)); + + while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ + int c = posListCmp(&left, &right); + if( c<0 ){ + plwCopy(&writer, &left); + plrStep(&left); + }else if( c>0 ){ + plwCopy(&writer, &right); + plrStep(&right); + }else{ + plwCopy(&writer, &left); + plrStep(&left); + plrStep(&right); + } + } + + plwTerminate(&writer); + plwDestroy(&writer); + plrDestroy(&left); + plrDestroy(&right); +} + +/* Write the union of doclists in pLeft and pRight to pOut. For +** docids in common between the inputs, the union of the position +** lists is written. Inputs and outputs are always type DL_DEFAULT. +*/ +static void docListUnion( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ){ + dataBufferAppend(pOut, pRight, nRight); + return; + } + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DEFAULT, pLeft, nLeft); + dlrInit(&right, DL_DEFAULT, pRight, nRight); + dlwInit(&writer, DL_DEFAULT, pOut); + + while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ + if( dlrAtEnd(&right) ){ + dlwCopy(&writer, &left); + dlrStep(&left); + }else if( dlrAtEnd(&left) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else if( dlrDocid(&left)dlrDocid(&right) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else{ + posListUnion(&left, &right, &writer); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* pLeft and pRight are DLReaders positioned to the same docid. +** +** If there are no instances in pLeft or pRight where the position +** of pLeft is one less than the position of pRight, then this +** routine adds nothing to pOut. +** +** If there are one or more instances where positions from pLeft +** are exactly one less than positions from pRight, then add a new +** document record to pOut. If pOut wants to hold positions, then +** include the positions from pRight that are one more than a +** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. +*/ +static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight, + DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + int match = 0; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pOut->iType!=DL_POSITIONS_OFFSETS ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + + while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ + if( plrColumn(&left)plrColumn(&right) ){ + plrStep(&right); + }else if( plrPosition(&left)+1plrPosition(&right) ){ + plrStep(&right); + }else{ + if( !match ){ + plwInit(&writer, pOut, dlrDocid(pLeft)); + match = 1; + } + plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); + plrStep(&left); + plrStep(&right); + } + } + + if( match ){ + plwTerminate(&writer); + plwDestroy(&writer); + } + + plrDestroy(&left); + plrDestroy(&right); +} + +/* We have two doclists with positions: pLeft and pRight. +** Write the phrase intersection of these two doclists into pOut. +** +** A phrase intersection means that two documents only match +** if pLeft.iPos+1==pRight.iPos. +** +** iType controls the type of data written to pOut. If iType is +** DL_POSITIONS, the positions are those from pRight. +*/ +static void docListPhraseMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DocListType iType, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 || nRight==0 ) return; + + assert( iType!=DL_POSITIONS_OFFSETS ); + + dlrInit(&left, DL_POSITIONS, pLeft, nLeft); + dlrInit(&right, DL_POSITIONS, pRight, nRight); + dlwInit(&writer, iType, pOut); + + while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ + if( dlrDocid(&left) one AND (two OR three) + * [one OR two three] ==> (one OR two) AND three + * + * A "-" before a term matches all entries that lack that term. + * The "-" must occur immediately before the term with in intervening + * space. This is how the search engines do it. + * + * A NOT term cannot be the right-hand operand of an OR. If this + * occurs in the query string, the NOT is ignored: + * + * [one OR -two] ==> one OR two + * + */ +typedef struct Query { + fulltext_vtab *pFts; /* The full text index */ + int nTerms; /* Number of terms in the query */ + QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ + int nextIsOr; /* Set the isOr flag on the next inserted term */ + int nextColumn; /* Next word parsed must be in this column */ + int dfltColumn; /* The default column */ +} Query; + + +/* +** An instance of the following structure keeps track of generated +** matching-word offset information and snippets. +*/ +typedef struct Snippet { + int nMatch; /* Total number of matches */ + int nAlloc; /* Space allocated for aMatch[] */ + struct snippetMatch { /* One entry for each matching term */ + char snStatus; /* Status flag for use while constructing snippets */ + short int iCol; /* The column that contains the match */ + short int iTerm; /* The index in Query.pTerms[] of the matching term */ + short int nByte; /* Number of bytes in the term */ + int iStart; /* The offset to the first character of the term */ + } *aMatch; /* Points to space obtained from malloc */ + char *zOffset; /* Text rendering of aMatch[] */ + int nOffset; /* strlen(zOffset) */ + char *zSnippet; /* Snippet text */ + int nSnippet; /* strlen(zSnippet) */ +} Snippet; + + +typedef enum QueryType { + QUERY_GENERIC, /* table scan */ + QUERY_DOCID, /* lookup by docid */ + QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ +} QueryType; + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_UPDATE_STMT, + CONTENT_DELETE_STMT, + + BLOCK_INSERT_STMT, + BLOCK_SELECT_STMT, + BLOCK_DELETE_STMT, + + SEGDIR_MAX_INDEX_STMT, + SEGDIR_SET_STMT, + SEGDIR_SELECT_STMT, + SEGDIR_SPAN_STMT, + SEGDIR_DELETE_STMT, + SEGDIR_SELECT_ALL_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(shess): Is there some risk that a statement will be used in two +** cursors at once, e.g. if a query joins a virtual table to itself? +** If so perhaps we should move some of these to the cursor object. +*/ +static const char *const fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ + /* CONTENT_SELECT */ NULL, /* generated in contentSelectStatement() */ + /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ + /* CONTENT_DELETE */ "delete from %_content where docid = ?", + + /* BLOCK_INSERT */ + "insert into %_segments (blockid, block) values (null, ?)", + /* BLOCK_SELECT */ "select block from %_segments where blockid = ?", + /* BLOCK_DELETE */ "delete from %_segments where blockid between ? and ?", + + /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", + /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", + /* SEGDIR_SELECT */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? order by idx", + /* SEGDIR_SPAN */ + "select min(start_block), max(end_block) from %_segdir " + " where level = ? and start_block <> 0", + /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", + /* SEGDIR_SELECT_ALL */ + "select root, leaves_end_block from %_segdir order by level desc, idx", +}; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct fulltext_vtab { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of columns in virtual table */ + char **azColumn; /* column names. malloced */ + char **azContentColumn; /* column names in content table; malloced */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; + + /* Precompiled statements used for segment merges. We run a + ** separate select across the leaf level of each tree being merged. + */ + sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; + /* The statement used to prepare pLeafSelectStmts. */ +#define LEAF_SELECT \ + "select block from %_segments where blockid between ? and ? order by blockid" + + /* These buffer pending index updates during transactions. + ** nPendingData estimates the memory size of the pending data. It + ** doesn't include the hash-bucket overhead, nor any malloc + ** overhead. When nPendingData exceeds kPendingThreshold, the + ** buffer is flushed even before the transaction closes. + ** pendingTerms stores the data, and is only valid when nPendingData + ** is >=0 (nPendingData<0 means pendingTerms has not been + ** initialized). iPrevDocid is the last docid written, used to make + ** certain we're inserting in sorted order. + */ + int nPendingData; +#define kPendingThreshold (1*1024*1024) + sqlite_int64 iPrevDocid; + fts3Hash pendingTerms; +}; + +/* +** When the core wants to do a query, it create a cursor using a +** call to xOpen. This structure is an instance of a cursor. It +** is destroyed by xClose. +*/ +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + int eof; /* True if at End Of Results */ + Query q; /* Parsed query string */ + Snippet snippet; /* Cached snippet for the current row */ + int iColumn; /* Column being searched */ + DataBuffer result; /* Doclist results from fulltextQuery */ + DLReader reader; /* Result reader if result not empty */ +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static const sqlite3_module fts3Module; /* forward declaration */ + +/* Return a dynamically generated statement of the form + * insert into %_content (docid, ...) values (?, ...) + */ +static const char *contentInsertStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "insert into %_content (docid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, ") values (?"); + for(i=0; inColumn; ++i) + append(&sb, ", ?"); + append(&sb, ")"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * select from %_content where docid = ? + */ +static const char *contentSelectStatement(fulltext_vtab *v){ + StringBuffer sb; + initStringBuffer(&sb); + append(&sb, "SELECT "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, " FROM %_content WHERE docid = ?"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * update %_content set [col_0] = ?, [col_1] = ?, ... + * where docid = ? + */ +static const char *contentUpdateStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "update %_content set "); + for(i=0; inColumn; ++i) { + if( i>0 ){ + append(&sb, ", "); + } + append(&sb, v->azContentColumn[i]); + append(&sb, " = ?"); + } + append(&sb, " where docid = ?"); + return stringBufferData(&sb); +} + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmtpFulltextStatements[iStmt]==NULL ){ + const char *zStmt; + int rc; + switch( iStmt ){ + case CONTENT_INSERT_STMT: + zStmt = contentInsertStatement(v); break; + case CONTENT_SELECT_STMT: + zStmt = contentSelectStatement(v); break; + case CONTENT_UPDATE_STMT: + zStmt = contentUpdateStatement(v); break; + default: + zStmt = fulltext_zStatement[iStmt]; + } + rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], + zStmt); + if( zStmt != fulltext_zStatement[iStmt]) free((void *) zStmt); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and +** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, +** where we expect no results. +*/ +static int sql_single_step(sqlite3_stmt *s){ + int rc = sqlite3_step(s); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* Like sql_get_statement(), but for special replicated LEAF_SELECT +** statements. +*/ +/* TODO(shess) Write version for generic statements and then share +** that between the cached-statement functions. +*/ +static int sql_get_leaf_statement(fulltext_vtab *v, int idx, + sqlite3_stmt **ppStmt){ + assert( idx>=0 && idxpLeafSelectStmts[idx]==NULL ){ + int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], + LEAF_SELECT); + if( rc!=SQLITE_OK ) return rc; + }else{ + int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pLeafSelectStmts[idx]; + return SQLITE_OK; +} + +/* insert into %_content (docid, ...) values ([docid], [pValues]) +** If the docid contains SQL NULL, then a unique docid will be +** generated. +*/ +static int content_insert(fulltext_vtab *v, sqlite3_value *docid, + sqlite3_value **pValues){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, docid); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 2+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + return sql_single_step(s); +} + +/* update %_content set col0 = pValues[0], col1 = pValues[1], ... + * where docid = [iDocid] */ +static int content_update(fulltext_vtab *v, sqlite3_value **pValues, + sqlite_int64 iDocid){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 1+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + rc = sqlite3_bind_int64(s, 1+v->nColumn, iDocid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +static void freeStringArray(int nString, const char **pString){ + int i; + + for (i=0 ; i < nString ; ++i) { + if( pString[i]!=NULL ) free((void *) pString[i]); + } + free((void *) pString); +} + +/* select * from %_content where docid = [iDocid] + * The caller must delete the returned array and all strings in it. + * null fields will be NULL in the returned array. + * + * TODO: Perhaps we should return pointer/length strings here for consistency + * with other code which uses pointer/length. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iDocid, + const char ***pValues){ + sqlite3_stmt *s; + const char **values; + int i; + int rc; + + *pValues = NULL; + + rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + values = (const char **) malloc(v->nColumn * sizeof(const char *)); + for(i=0; inColumn; ++i){ + if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + values[i] = NULL; + }else{ + values[i] = string_dup((char*)sqlite3_column_text(s, i)); + } + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + *pValues = values; + return SQLITE_OK; + } + + freeStringArray(v->nColumn, values); + return rc; +} + +/* delete from %_content where docid = [iDocid ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iDocid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* insert into %_segments values ([pData]) +** returns assigned blockid in *piBlockid +*/ +static int block_insert(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 *piBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + /* blockid column is an alias for rowid. */ + *piBlockid = sqlite3_last_insert_rowid(v->db); + return SQLITE_OK; +} + +/* delete from %_segments +** where blockid between [iStartBlockid] and [iEndBlockid] +** +** Deletes the range of blocks, inclusive, used to delete the blocks +** which form a segment. +*/ +static int block_delete(fulltext_vtab *v, + sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW with *pidx set to the maximum segment idx found +** at iLevel. Returns SQLITE_DONE if there are no segments at +** iLevel. Otherwise returns an error. +*/ +static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* Should always get at least one row due to how max() works. */ + if( rc==SQLITE_DONE ) return SQLITE_DONE; + if( rc!=SQLITE_ROW ) return rc; + + /* NULL means that there were no inputs to max(). */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; + } + + *pidx = sqlite3_column_int(s, 0); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* insert into %_segdir values ( +** [iLevel], [idx], +** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], +** [pRootData] +** ) +*/ +static int segdir_set(fulltext_vtab *v, int iLevel, int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iLeavesEndBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 2, idx); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 3, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 5, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Queries %_segdir for the block span of the segments in level +** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, +** SQLITE_ROW if there are blocks, else an error. +*/ +static int segdir_span(fulltext_vtab *v, int iLevel, + sqlite_int64 *piStartBlockid, + sqlite_int64 *piEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ + if( rc!=SQLITE_ROW ) return rc; + + /* This happens if all segments at this level are entirely inline. */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + int rc2 = sqlite3_step(s); + if( rc2==SQLITE_ROW ) return SQLITE_ERROR; + return rc2; + } + + *piStartBlockid = sqlite3_column_int64(s, 0); + *piEndBlockid = sqlite3_column_int64(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* Delete the segment blocks and segment directory records for all +** segments at iLevel. +*/ +static int segdir_delete(fulltext_vtab *v, int iLevel){ + sqlite3_stmt *s; + sqlite_int64 iStartBlockid, iEndBlockid; + int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; + + if( rc==SQLITE_ROW ){ + rc = block_delete(v, iStartBlockid, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + } + + /* Delete the segment directory itself. */ + rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* TODO(shess) clearPendingTerms() is far down the file because +** writeZeroSegment() is far down the file because LeafWriter is far +** down the file. Consider refactoring the code to move the non-vtab +** code above the vtab code so that we don't need this forward +** reference. +*/ +static int clearPendingTerms(fulltext_vtab *v); + +/* +** Free the memory used to contain a fulltext_vtab structure. +*/ +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt, i; + + TRACE(("FTS3 Destroy %p\n", v)); + for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + for( i=0; ipLeafSelectStmts[i]!=NULL ){ + sqlite3_finalize(v->pLeafSelectStmts[i]); + v->pLeafSelectStmts[i] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + clearPendingTerms(v); + + free(v->azColumn); + for(i = 0; i < v->nColumn; ++i) { + sqlite3_free(v->azContentColumn[i]); + } + free(v->azContentColumn); + free(v); +} + +/* +** Token types for parsing the arguments to xConnect or xCreate. +*/ +#define TOKEN_EOF 0 /* End of file */ +#define TOKEN_SPACE 1 /* Any kind of whitespace */ +#define TOKEN_ID 2 /* An identifier */ +#define TOKEN_STRING 3 /* A string literal */ +#define TOKEN_PUNCT 4 /* A single punctuation character */ + +/* +** If X is a character that can be used in an identifier then +** IdChar(X) will be true. Otherwise it is false. +** +** For ASCII, any character with the high-order bit set is +** allowed in an identifier. For 7-bit characters, +** sqlite3IsIdChar[X] must be 1. +** +** Ticket #1066. the SQL standard does not allow '$' in the +** middle of identfiers. But many SQL implementations do. +** SQLite will allow '$' in identifiers for compatibility. +** But the feature is undocumented. +*/ +static const char isIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) + + +/* +** Return the length of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +static int getToken(const char *z, int *tokenType){ + int i, c; + switch( *z ){ + case 0: { + *tokenType = TOKEN_EOF; + return 0; + } + case ' ': case '\t': case '\n': case '\f': case '\r': { + for(i=1; safe_isspace(z[i]); i++){} + *tokenType = TOKEN_SPACE; + return i; + } + case '`': + case '\'': + case '"': { + int delim = z[0]; + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + *tokenType = TOKEN_STRING; + return i + (c!=0); + } + case '[': { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = TOKEN_ID; + return i; + } + default: { + if( !IdChar(*z) ){ + break; + } + for(i=1; IdChar(z[i]); i++){} + *tokenType = TOKEN_ID; + return i; + } + } + *tokenType = TOKEN_PUNCT; + return 1; +} + +/* +** A token extracted from a string is an instance of the following +** structure. +*/ +typedef struct Token { + const char *z; /* Pointer to token text. Not '\000' terminated */ + short int n; /* Length of the token text in bytes. */ +} Token; + +/* +** Given a input string (which is really one of the argv[] parameters +** passed into xConnect or xCreate) split the string up into tokens. +** Return an array of pointers to '\000' terminated strings, one string +** for each non-whitespace token. +** +** The returned array is terminated by a single NULL pointer. +** +** Space to hold the returned array is obtained from a single +** malloc and should be freed by passing the return value to free(). +** The individual strings within the token list are all a part of +** the single memory allocation and will all be freed at once. +*/ +static char **tokenizeString(const char *z, int *pnToken){ + int nToken = 0; + Token *aToken = malloc( strlen(z) * sizeof(aToken[0]) ); + int n = 1; + int e, i; + int totalSize = 0; + char **azToken; + char *zCopy; + while( n>0 ){ + n = getToken(z, &e); + if( e!=TOKEN_SPACE ){ + aToken[nToken].z = z; + aToken[nToken].n = n; + nToken++; + totalSize += n+1; + } + z += n; + } + azToken = (char**)malloc( nToken*sizeof(char*) + totalSize ); + zCopy = (char*)&azToken[nToken]; + nToken--; + for(i=0; i=0 ){ + azIn[j] = azIn[i]; + } + j++; + } + } + azIn[j] = 0; + } +} + + +/* +** Find the first alphanumeric token in the string zIn. Null-terminate +** this token. Remove any quotation marks. And return a pointer to +** the result. +*/ +static char *firstToken(char *zIn, char **pzTail){ + int n, ttype; + while(1){ + n = getToken(zIn, &ttype); + if( ttype==TOKEN_SPACE ){ + zIn += n; + }else if( ttype==TOKEN_EOF ){ + *pzTail = zIn; + return 0; + }else{ + zIn[n] = 0; + *pzTail = &zIn[1]; + dequoteString(zIn); + return zIn; + } + } + /*NOTREACHED*/ +} + +/* Return true if... +** +** * s begins with the string t, ignoring case +** * s is longer than t +** * The first character of s beyond t is not a alphanumeric +** +** Ignore leading space in *s. +** +** To put it another way, return true if the first token of +** s[] is t[]. +*/ +static int startsWith(const char *s, const char *t){ + while( safe_isspace(*s) ){ s++; } + while( *t ){ + if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; + } + return *s!='_' && !safe_isalnum(*s); +} + +/* +** An instance of this structure defines the "spec" of a +** full text index. This structure is populated by parseSpec +** and use by fulltextConnect and fulltextCreate. +*/ +typedef struct TableSpec { + const char *zDb; /* Logical database name */ + const char *zName; /* Name of the full-text index */ + int nColumn; /* Number of columns to be indexed */ + char **azColumn; /* Original names of columns to be indexed */ + char **azContentColumn; /* Column names for %_content */ + char **azTokenizer; /* Name of tokenizer and its arguments */ +} TableSpec; + +/* +** Reclaim all of the memory used by a TableSpec +*/ +static void clearTableSpec(TableSpec *p) { + free(p->azColumn); + free(p->azContentColumn); + free(p->azTokenizer); +} + +/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: + * + * CREATE VIRTUAL TABLE email + * USING fts3(subject, body, tokenize mytokenizer(myarg)) + * + * We return parsed information in a TableSpec structure. + * + */ +static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, + char**pzErr){ + int i, n; + char *z, *zDummy; + char **azArg; + const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ + + assert( argc>=3 ); + /* Current interface: + ** argv[0] - module name + ** argv[1] - database name + ** argv[2] - table name + ** argv[3..] - columns, optionally followed by tokenizer specification + ** and snippet delimiters specification. + */ + + /* Make a copy of the complete argv[][] array in a single allocation. + ** The argv[][] array is read-only and transient. We can write to the + ** copy in order to modify things and the copy is persistent. + */ + CLEAR(pSpec); + for(i=n=0; izDb = azArg[1]; + pSpec->zName = azArg[2]; + pSpec->nColumn = 0; + pSpec->azColumn = azArg; + zTokenizer = "tokenize simple"; + for(i=3; inColumn] = firstToken(azArg[i], &zDummy); + pSpec->nColumn++; + } + } + if( pSpec->nColumn==0 ){ + azArg[0] = "content"; + pSpec->nColumn = 1; + } + + /* + ** Construct the list of content column names. + ** + ** Each content column name will be of the form cNNAAAA + ** where NN is the column number and AAAA is the sanitized + ** column name. "sanitized" means that special characters are + ** converted to "_". The cNN prefix guarantees that all column + ** names are unique. + ** + ** The AAAA suffix is not strictly necessary. It is included + ** for the convenience of people who might examine the generated + ** %_content table and wonder what the columns are used for. + */ + pSpec->azContentColumn = malloc( pSpec->nColumn * sizeof(char *) ); + if( pSpec->azContentColumn==0 ){ + clearTableSpec(pSpec); + return SQLITE_NOMEM; + } + for(i=0; inColumn; i++){ + char *p; + pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); + for (p = pSpec->azContentColumn[i]; *p ; ++p) { + if( !safe_isalnum(*p) ) *p = '_'; + } + } + + /* + ** Parse the tokenizer specification string. + */ + pSpec->azTokenizer = tokenizeString(zTokenizer, &n); + tokenListToIdList(pSpec->azTokenizer); + + return SQLITE_OK; +} + +/* +** Generate a CREATE TABLE statement that describes the schema of +** the virtual table. Return a pointer to this schema string. +** +** Space is obtained from sqlite3_mprintf() and should be freed +** using sqlite3_free(). +*/ +static char *fulltextSchema( + int nColumn, /* Number of columns */ + const char *const* azColumn, /* List of columns */ + const char *zTableName /* Name of the table */ +){ + int i; + char *zSchema, *zNext; + const char *zSep = "("; + zSchema = sqlite3_mprintf("CREATE TABLE x"); + for(i=0; ibase */ + v->db = db; + v->zDb = spec->zDb; /* Freed when azColumn is freed */ + v->zName = spec->zName; /* Freed when azColumn is freed */ + v->nColumn = spec->nColumn; + v->azContentColumn = spec->azContentColumn; + spec->azContentColumn = 0; + v->azColumn = spec->azColumn; + spec->azColumn = 0; + + if( spec->azTokenizer==0 ){ + return SQLITE_NOMEM; + } + + zTok = spec->azTokenizer[0]; + if( !zTok ){ + zTok = "simple"; + } + nTok = strlen(zTok)+1; + + m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zTok, nTok); + if( !m ){ + *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); + rc = SQLITE_ERROR; + goto err; + } + + for(n=0; spec->azTokenizer[n]; n++){} + if( n ){ + rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], + &v->pTokenizer); + }else{ + rc = m->xCreate(0, 0, &v->pTokenizer); + } + if( rc!=SQLITE_OK ) goto err; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, + spec->zName); + rc = sqlite3_declare_vtab(db, schema); + sqlite3_free(schema); + if( rc!=SQLITE_OK ) goto err; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + /* Indicate that the buffer is not live. */ + v->nPendingData = -1; + + *ppVTab = &v->base; + TRACE(("FTS3 Connect %p\n", v)); + + return rc; + +err: + fulltext_vtab_destroy(v); + return rc; +} + +static int fulltextConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, + char **pzErr +){ + TableSpec spec; + int rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); + clearTableSpec(&spec); + return rc; +} + +/* The %_content table holds the text of each document, with +** the docid column exposed as the SQLite rowid for the table. +*/ +/* TODO(shess) This comment needs elaboration to match the updated +** code. Work it into the top-of-file comment at that time. +*/ +static int fulltextCreate(sqlite3 *db, void *pAux, + int argc, const char * const *argv, + sqlite3_vtab **ppVTab, char **pzErr){ + int rc; + TableSpec spec; + StringBuffer schema; + TRACE(("FTS3 Create\n")); + + rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + initStringBuffer(&schema); + append(&schema, "CREATE TABLE %_content("); + append(&schema, " docid INTEGER PRIMARY KEY,"); + appendList(&schema, spec.nColumn, spec.azContentColumn); + append(&schema, ")"); + rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); + stringBufferDestroy(&schema); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segments(" + " blockid INTEGER PRIMARY KEY," + " block blob" + ");" + ); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segdir(" + " level integer," + " idx integer," + " start_block integer," + " leaves_end_block integer," + " end_block integer," + " root blob," + " primary key(level, idx)" + ");"); + if( rc!=SQLITE_OK ) goto out; + + rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); + +out: + clearTableSpec(&spec); + return rc; +} + +/* Decide how to handle an SQL query. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int i; + TRACE(("FTS3 BestIndex\n")); + + for(i=0; inConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->usable ) { + if( (pConstraint->iColumn==-1 || pConstraint->iColumn==v->nColumn+1) && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + pInfo->idxNum = QUERY_DOCID; /* lookup by docid */ + TRACE(("FTS3 QUERY_DOCID\n")); + } else if( pConstraint->iColumn>=0 && pConstraint->iColumn<=v->nColumn && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* full-text search */ + pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; + TRACE(("FTS3 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); + } else continue; + + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + + /* An arbitrary value for now. + * TODO: Perhaps docid matches should be considered cheaper than + * full-text searches. */ + pInfo->estimatedCost = 1.0; + + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + TRACE(("FTS3 Disconnect %p\n", pVTab)); + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int rc; + + TRACE(("FTS3 Destroy %p\n", pVTab)); + rc = sql_exec(v->db, v->zDb, v->zName, + "drop table if exists %_content;" + "drop table if exists %_segments;" + "drop table if exists %_segdir;" + ); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + TRACE(("FTS3 Open %p: %p\n", pVTab, c)); + + return SQLITE_OK; +} + + +/* Free all of the dynamically allocated memory held by *q +*/ +static void queryClear(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + free(q->pTerms[i].pTerm); + } + free(q->pTerms); + CLEAR(q); +} + +/* Free all of the dynamically allocated memory held by the +** Snippet +*/ +static void snippetClear(Snippet *p){ + free(p->aMatch); + free(p->zOffset); + free(p->zSnippet); + CLEAR(p); +} +/* +** Append a single entry to the p->aMatch[] log. +*/ +static void snippetAppendMatch( + Snippet *p, /* Append the entry to this snippet */ + int iCol, int iTerm, /* The column and query term */ + int iStart, int nByte /* Offset and size of the match */ +){ + int i; + struct snippetMatch *pMatch; + if( p->nMatch+1>=p->nAlloc ){ + p->nAlloc = p->nAlloc*2 + 10; + p->aMatch = realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + if( p->aMatch==0 ){ + p->nMatch = 0; + p->nAlloc = 0; + return; + } + } + i = p->nMatch++; + pMatch = &p->aMatch[i]; + pMatch->iCol = iCol; + pMatch->iTerm = iTerm; + pMatch->iStart = iStart; + pMatch->nByte = nByte; +} + +/* +** Sizing information for the circular buffer used in snippetOffsetsOfColumn() +*/ +#define FTS3_ROTOR_SZ (32) +#define FTS3_ROTOR_MASK (FTS3_ROTOR_SZ-1) + +/* +** Add entries to pSnippet->aMatch[] for every match that occurs against +** document zDoc[0..nDoc-1] which is stored in column iColumn. +*/ +static void snippetOffsetsOfColumn( + Query *pQuery, + Snippet *pSnippet, + int iColumn, + const char *zDoc, + int nDoc +){ + const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ + sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ + sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ + fulltext_vtab *pVtab; /* The full text index */ + int nColumn; /* Number of columns in the index */ + const QueryTerm *aTerm; /* Query string terms */ + int nTerm; /* Number of query string terms */ + int i, j; /* Loop counters */ + int rc; /* Return code */ + unsigned int match, prevMatch; /* Phrase search bitmasks */ + const char *zToken; /* Next token from the tokenizer */ + int nToken; /* Size of zToken */ + int iBegin, iEnd, iPos; /* Offsets of beginning and end */ + + /* The following variables keep a circular buffer of the last + ** few tokens */ + unsigned int iRotor = 0; /* Index of current token */ + int iRotorBegin[FTS3_ROTOR_SZ]; /* Beginning offset of token */ + int iRotorLen[FTS3_ROTOR_SZ]; /* Length of token */ + + pVtab = pQuery->pFts; + nColumn = pVtab->nColumn; + pTokenizer = pVtab->pTokenizer; + pTModule = pTokenizer->pModule; + rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); + if( rc ) return; + pTCursor->pTokenizer = pTokenizer; + aTerm = pQuery->pTerms; + nTerm = pQuery->nTerms; + if( nTerm>=FTS3_ROTOR_SZ ){ + nTerm = FTS3_ROTOR_SZ - 1; + } + prevMatch = 0; + while(1){ + rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); + if( rc ) break; + iRotorBegin[iRotor&FTS3_ROTOR_MASK] = iBegin; + iRotorLen[iRotor&FTS3_ROTOR_MASK] = iEnd-iBegin; + match = 0; + for(i=0; i=0 && iColnToken ) continue; + if( !aTerm[i].isPrefix && aTerm[i].nTerm1 && (prevMatch & (1<=0; j--){ + int k = (iRotor-j) & FTS3_ROTOR_MASK; + snippetAppendMatch(pSnippet, iColumn, i-j, + iRotorBegin[k], iRotorLen[k]); + } + } + } + prevMatch = match<<1; + iRotor++; + } + pTModule->xClose(pTCursor); +} + + +/* +** Compute all offsets for the current row of the query. +** If the offsets have already been computed, this routine is a no-op. +*/ +static void snippetAllOffsets(fulltext_cursor *p){ + int nColumn; + int iColumn, i; + int iFirst, iLast; + fulltext_vtab *pFts; + + if( p->snippet.nMatch ) return; + if( p->q.nTerms==0 ) return; + pFts = p->q.pFts; + nColumn = pFts->nColumn; + iColumn = (p->iCursorType - QUERY_FULLTEXT); + if( iColumn<0 || iColumn>=nColumn ){ + iFirst = 0; + iLast = nColumn-1; + }else{ + iFirst = iColumn; + iLast = iColumn; + } + for(i=iFirst; i<=iLast; i++){ + const char *zDoc; + int nDoc; + zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); + nDoc = sqlite3_column_bytes(p->pStmt, i+1); + snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); + } +} + +/* +** Convert the information in the aMatch[] array of the snippet +** into the string zOffset[0..nOffset-1]. +*/ +static void snippetOffsetText(Snippet *p){ + int i; + int cnt = 0; + StringBuffer sb; + char zBuf[200]; + if( p->zOffset ) return; + initStringBuffer(&sb); + for(i=0; inMatch; i++){ + struct snippetMatch *pMatch = &p->aMatch[i]; + zBuf[0] = ' '; + sprintf(&zBuf[cnt>0], "%d %d %d %d", pMatch->iCol, + pMatch->iTerm, pMatch->iStart, pMatch->nByte); + append(&sb, zBuf); + cnt++; + } + p->zOffset = stringBufferData(&sb); + p->nOffset = stringBufferLength(&sb); +} + +/* +** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set +** of matching words some of which might be in zDoc. zDoc is column +** number iCol. +** +** iBreak is suggested spot in zDoc where we could begin or end an +** excerpt. Return a value similar to iBreak but possibly adjusted +** to be a little left or right so that the break point is better. +*/ +static int wordBoundary( + int iBreak, /* The suggested break point */ + const char *zDoc, /* Document text */ + int nDoc, /* Number of bytes in zDoc[] */ + struct snippetMatch *aMatch, /* Matching words */ + int nMatch, /* Number of entries in aMatch[] */ + int iCol /* The column number for zDoc[] */ +){ + int i; + if( iBreak<=10 ){ + return 0; + } + if( iBreak>=nDoc-10 ){ + return nDoc; + } + for(i=0; i0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ + return aMatch[i-1].iStart; + } + } + for(i=1; i<=10; i++){ + if( safe_isspace(zDoc[iBreak-i]) ){ + return iBreak - i + 1; + } + if( safe_isspace(zDoc[iBreak+i]) ){ + return iBreak + i + 1; + } + } + return iBreak; +} + + + +/* +** Allowed values for Snippet.aMatch[].snStatus +*/ +#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ +#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ + +/* +** Generate the text of a snippet. +*/ +static void snippetText( + fulltext_cursor *pCursor, /* The cursor we need the snippet for */ + const char *zStartMark, /* Markup to appear before each match */ + const char *zEndMark, /* Markup to appear after each match */ + const char *zEllipsis /* Ellipsis mark */ +){ + int i, j; + struct snippetMatch *aMatch; + int nMatch; + int nDesired; + StringBuffer sb; + int tailCol; + int tailOffset; + int iCol; + int nDoc; + const char *zDoc; + int iStart, iEnd; + int tailEllipsis = 0; + int iMatch; + + + free(pCursor->snippet.zSnippet); + pCursor->snippet.zSnippet = 0; + aMatch = pCursor->snippet.aMatch; + nMatch = pCursor->snippet.nMatch; + initStringBuffer(&sb); + + for(i=0; iq.nTerms; i++){ + for(j=0; j0; i++){ + if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; + nDesired--; + iCol = aMatch[i].iCol; + zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); + iStart = aMatch[i].iStart - 40; + iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); + if( iStart<=10 ){ + iStart = 0; + } + if( iCol==tailCol && iStart<=tailOffset+20 ){ + iStart = tailOffset; + } + if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ + trimWhiteSpace(&sb); + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + appendWhiteSpace(&sb); + } + iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; + iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); + if( iEnd>=nDoc-10 ){ + iEnd = nDoc; + tailEllipsis = 0; + }else{ + tailEllipsis = 1; + } + while( iMatchsnippet.zSnippet = stringBufferData(&sb); + pCursor->snippet.nSnippet = stringBufferLength(&sb); +} + + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + TRACE(("FTS3 Close %p\n", c)); + sqlite3_finalize(c->pStmt); + queryClear(&c->q); + snippetClear(&c->snippet); + if( c->result.nData!=0 ) dlrDestroy(&c->reader); + dataBufferDestroy(&c->result); + free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + int rc; + + TRACE(("FTS3 Next %p\n", pCursor)); + snippetClear(&c->snippet); + if( c->iCursorType < QUERY_FULLTEXT ){ + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + } else { /* full-text query */ + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ + c->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); + dlrStep(&c->reader); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + } +} + + +/* TODO(shess) If we pushed LeafReader to the top of the file, or to +** another file, term_select() could be pushed above +** docListOfTerm(). +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out); + +/* Return a DocList corresponding to the query term *pTerm. If *pTerm +** is the first term of a phrase query, go ahead and evaluate the phrase +** query and return the doclist for the entire phrase query. +** +** The resulting DL_DOCIDS doclist is stored in pResult, which is +** overwritten. +*/ +static int docListOfTerm( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* column to restrict to. No restriction if >=nColumn */ + QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ + DataBuffer *pResult /* Write the result here */ +){ + DataBuffer left, right, new; + int i, rc; + + /* No phrase search if no position info. */ + assert( pQTerm->nPhrase==0 || DL_DEFAULT!=DL_DOCIDS ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&left, 0); + rc = termSelect(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pQTerm->isPrefix, + 0nPhrase ? DL_POSITIONS : DL_DOCIDS, &left); + if( rc ) return rc; + for(i=1; i<=pQTerm->nPhrase && left.nData>0; i++){ + dataBufferInit(&right, 0); + rc = termSelect(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, + pQTerm[i].isPrefix, DL_POSITIONS, &right); + if( rc ){ + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListPhraseMerge(left.pData, left.nData, right.pData, right.nData, + inPhrase ? DL_POSITIONS : DL_DOCIDS, &new); + dataBufferDestroy(&left); + dataBufferDestroy(&right); + left = new; + } + *pResult = left; + return SQLITE_OK; +} + +/* Add a new term pTerm[0..nTerm-1] to the query *q. +*/ +static void queryAdd(Query *q, const char *pTerm, int nTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerms = realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); + if( q->pTerms==0 ){ + q->nTerms = 0; + return; + } + t = &q->pTerms[q->nTerms - 1]; + CLEAR(t); + t->pTerm = malloc(nTerm+1); + memcpy(t->pTerm, pTerm, nTerm); + t->pTerm[nTerm] = 0; + t->nTerm = nTerm; + t->isOr = q->nextIsOr; + t->isPrefix = 0; + q->nextIsOr = 0; + t->iColumn = q->nextColumn; + q->nextColumn = q->dfltColumn; +} + +/* +** Check to see if the string zToken[0...nToken-1] matches any +** column name in the virtual table. If it does, +** return the zero-indexed column number. If not, return -1. +*/ +static int checkColumnSpecifier( + fulltext_vtab *pVtab, /* The virtual table */ + const char *zToken, /* Text of the token */ + int nToken /* Number of characters in the token */ +){ + int i; + for(i=0; inColumn; i++){ + if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 + && pVtab->azColumn[i][nToken]==0 ){ + return i; + } + } + return -1; +} + +/* +** Parse the text at pSegment[0..nSegment-1]. Add additional terms +** to the query being assemblied in pQuery. +** +** inPhrase is true if pSegment[0..nSegement-1] is contained within +** double-quotes. If inPhrase is true, then the first term +** is marked with the number of terms in the phrase less one and +** OR and "-" syntax is ignored. If inPhrase is false, then every +** term found is marked with nPhrase=0 and OR and "-" syntax is significant. +*/ +static int tokenizeSegment( + sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ + const char *pSegment, int nSegment, /* Query expression being parsed */ + int inPhrase, /* True if within "..." */ + Query *pQuery /* Append results here */ +){ + const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int firstIndex = pQuery->nTerms; + int iCol; + int nTerm = 1; + + int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *pToken; + int nToken, iBegin, iEnd, iPos; + + rc = pModule->xNext(pCursor, + &pToken, &nToken, + &iBegin, &iEnd, &iPos); + if( rc!=SQLITE_OK ) break; + if( !inPhrase && + pSegment[iEnd]==':' && + (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ + pQuery->nextColumn = iCol; + continue; + } + if( !inPhrase && pQuery->nTerms>0 && nToken==2 + && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ + pQuery->nextIsOr = 1; + continue; + } + queryAdd(pQuery, pToken, nToken); + if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ + pQuery->pTerms[pQuery->nTerms-1].isNot = 1; + } + if( iEndpTerms[pQuery->nTerms-1].isPrefix = 1; + } + pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; + if( inPhrase ){ + nTerm++; + } + } + + if( inPhrase && pQuery->nTerms>firstIndex ){ + pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object pQuery. +** +** The calling function will need to queryClear() to clean up +** the dynamically allocated memory held by pQuery. +*/ +static int parseQuery( + fulltext_vtab *v, /* The fulltext index */ + const char *zInput, /* Input text of the query string */ + int nInput, /* Size of the input text */ + int dfltColumn, /* Default column of the index to match against */ + Query *pQuery /* Write the parse results here. */ +){ + int iInput, inPhrase = 0; + + if( zInput==0 ) nInput = 0; + if( nInput<0 ) nInput = strlen(zInput); + pQuery->nTerms = 0; + pQuery->pTerms = NULL; + pQuery->nextIsOr = 0; + pQuery->nextColumn = dfltColumn; + pQuery->dfltColumn = dfltColumn; + pQuery->pFts = v; + + for(iInput=0; iInputiInput ){ + tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, + pQuery); + } + iInput = i; + if( i=nColumn +** they are allowed to match against any column. +*/ +static int fulltextQuery( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* Match against this column by default */ + const char *zInput, /* The query string */ + int nInput, /* Number of bytes in zInput[] */ + DataBuffer *pResult, /* Write the result doclist here */ + Query *pQuery /* Put parsed query string here */ +){ + int i, iNext, rc; + DataBuffer left, right, or, new; + int nNot = 0; + QueryTerm *aTerm; + + /* TODO(shess) Instead of flushing pendingTerms, we could query for + ** the relevant term and merge the doclist into what we receive from + ** the database. Wait and see if this is a common issue, first. + ** + ** A good reason not to flush is to not generate update-related + ** error codes from here. + */ + + /* Flush any buffered updates before executing the query. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + + /* TODO(shess) I think that the queryClear() calls below are not + ** necessary, because fulltextClose() already clears the query. + */ + rc = parseQuery(v, zInput, nInput, iColumn, pQuery); + if( rc!=SQLITE_OK ) return rc; + + /* Empty or NULL queries return no results. */ + if( pQuery->nTerms==0 ){ + dataBufferInit(pResult, 0); + return SQLITE_OK; + } + + /* Merge AND terms. */ + /* TODO(shess) I think we can early-exit if( i>nNot && left.nData==0 ). */ + aTerm = pQuery->pTerms; + for(i = 0; inTerms; i=iNext){ + if( aTerm[i].isNot ){ + /* Handle all NOT terms in a separate pass */ + nNot++; + iNext = i + aTerm[i].nPhrase+1; + continue; + } + iNext = i + aTerm[i].nPhrase + 1; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + queryClear(pQuery); + return rc; + } + while( iNextnTerms && aTerm[iNext].isOr ){ + rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &or); + iNext += aTerm[iNext].nPhrase + 1; + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + dataBufferDestroy(&right); + queryClear(pQuery); + return rc; + } + dataBufferInit(&new, 0); + docListOrMerge(right.pData, right.nData, or.pData, or.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&or); + right = new; + } + if( i==nNot ){ /* first term processed. */ + left = right; + }else{ + dataBufferInit(&new, 0); + docListAndMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + } + + if( nNot==pQuery->nTerms ){ + /* We do not yet know how to handle a query of only NOT terms */ + return SQLITE_ERROR; + } + + /* Do the EXCEPT terms */ + for(i=0; inTerms; i += aTerm[i].nPhrase + 1){ + if( !aTerm[i].isNot ) continue; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + queryClear(pQuery); + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListExceptMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + + *pResult = left; + return rc; +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==QUERY_GENERIC then do a full table scan against +** the %_content table. +** +** If idxNum==QUERY_DOCID then do a docid lookup for a single entry +** in the %_content table. +** +** If idxNum>=QUERY_FULLTEXT then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fulltextFilter() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts3 as appropriate. +*/ +static int fulltextFilter( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, const char *idxStr, /* Which indexing scheme to use */ + int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ +){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + StringBuffer sb; + + TRACE(("FTS3 Filter %p\n",pCursor)); + + initStringBuffer(&sb); + append(&sb, "SELECT docid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, " FROM %_content"); + if( idxNum!=QUERY_GENERIC ) append(&sb, " WHERE docid = ?"); + sqlite3_finalize(c->pStmt); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, stringBufferData(&sb)); + stringBufferDestroy(&sb); + if( rc!=SQLITE_OK ) return rc; + + c->iCursorType = idxNum; + switch( idxNum ){ + case QUERY_GENERIC: + break; + + case QUERY_DOCID: + rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); + if( rc!=SQLITE_OK ) return rc; + break; + + default: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + assert( idxNum<=QUERY_FULLTEXT+v->nColumn); + assert( argc==1 ); + queryClear(&c->q); + if( c->result.nData!=0 ){ + /* This case happens if the same cursor is used repeatedly. */ + dlrDestroy(&c->reader); + dataBufferReset(&c->result); + }else{ + dataBufferInit(&c->result, 0); + } + rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q); + if( rc!=SQLITE_OK ) return rc; + if( c->result.nData!=0 ){ + dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); + } + break; + } + } + + return fulltextNext(pCursor); +} + +/* This is the xEof method of the virtual table. The SQLite core +** calls this routine to find out if it has reached the end of +** a query's results set. +*/ +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +/* This is the xColumn method of the virtual table. The SQLite +** core calls this method during a query when it needs the value +** of a column from the virtual table. This method needs to use +** one of the sqlite3_result_*() routines to store the requested +** value back in the pContext. +*/ +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + + if( idxColnColumn ){ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); + sqlite3_result_value(pContext, pVal); + }else if( idxCol==v->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor + */ + sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); + }else if( idxCol==v->nColumn+1 ){ + /* The docid column, which is an alias for rowid. */ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, 0); + sqlite3_result_value(pContext, pVal); + } + return SQLITE_OK; +} + +/* This is the xRowid method. The SQLite core calls this routine to +** retrieve the rowid for the current row of the result set. fts3 +** exposes %_content.docid as the rowid for the virtual table. The +** rowid should be written to *pRowid. +*/ +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, +** we also store positions and offsets in the hash table using that +** column number. +*/ +static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, + const char *zText, int iColumn){ + sqlite3_tokenizer *pTokenizer = v->pTokenizer; + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + int rc; + + rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition) ){ + DLCollector *p; + int nData; /* Size of doclist before our update. */ + + /* Positions can't be negative; we use -1 as a terminator internally. */ + if( iPosition<0 ){ + pTokenizer->pModule->xClose(pCursor); + return SQLITE_ERROR; + } + + p = fts3HashFind(&v->pendingTerms, pToken, nTokenBytes); + if( p==NULL ){ + nData = 0; + p = dlcNew(iDocid, DL_DEFAULT); + fts3HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); + + /* Overhead for our hash table entry, the key, and the value. */ + v->nPendingData += sizeof(struct fts3HashElem)+sizeof(*p)+nTokenBytes; + }else{ + nData = p->b.nData; + if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); + } + if( iColumn>=0 ){ + dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); + } + + /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ + v->nPendingData += p->b.nData-nData; + } + + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + return rc; +} + +/* Add doclists for all terms in [pValues] to pendingTerms table. */ +static int insertTerms(fulltext_vtab *v, sqlite_int64 iDocid, + sqlite3_value **pValues){ + int i; + for(i = 0; i < v->nColumn ; ++i){ + char *zText = (char*)sqlite3_value_text(pValues[i]); + int rc = buildTerms(v, iDocid, zText, i); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Add empty doclists for all terms in the given row's content to +** pendingTerms. +*/ +static int deleteTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + const char **pValues; + int i, rc; + + /* TODO(shess) Should we allow such tables at all? */ + if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; + + rc = content_select(v, iDocid, &pValues); + if( rc!=SQLITE_OK ) return rc; + + for(i = 0 ; i < v->nColumn; ++i) { + rc = buildTerms(v, iDocid, pValues[i], -1); + if( rc!=SQLITE_OK ) break; + } + + freeStringArray(v->nColumn, pValues); + return SQLITE_OK; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); + +/* Insert a row into the %_content table; set *piDocid to be the ID of the +** new row. Add doclists for terms to pendingTerms. +*/ +static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestDocid, + sqlite3_value **pValues, sqlite_int64 *piDocid){ + int rc; + + rc = content_insert(v, pRequestDocid, pValues); /* execute an SQL INSERT */ + if( rc!=SQLITE_OK ) return rc; + + /* docid column is an alias for rowid. */ + *piDocid = sqlite3_last_insert_rowid(v->db); + rc = initPendingTerms(v, *piDocid); + if( rc!=SQLITE_OK ) return rc; + + return insertTerms(v, *piDocid, pValues); +} + +/* Delete a row from the %_content table; add empty doclists for terms +** to pendingTerms. +*/ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + return content_delete(v, iRow); /* execute an SQL DELETE */ +} + +/* Update a row in the %_content table; add delete doclists to +** pendingTerms for old terms not in the new data, add insert doclists +** to pendingTerms for terms in the new data. +*/ +static int index_update(fulltext_vtab *v, sqlite_int64 iRow, + sqlite3_value **pValues){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + /* Generate an empty doclist for each term that previously appeared in this + * row. */ + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ + if( rc!=SQLITE_OK ) return rc; + + /* Now add positions for terms which appear in the updated row. */ + return insertTerms(v, iRow, pValues); +} + +/*******************************************************************/ +/* InteriorWriter is used to collect terms and block references into +** interior nodes in %_segments. See commentary at top of file for +** format. +*/ + +/* How large interior nodes can grow. */ +#define INTERIOR_MAX 2048 + +/* Minimum number of terms per interior node (except the root). This +** prevents large terms from making the tree too skinny - must be >0 +** so that the tree always makes progress. Note that the min tree +** fanout will be INTERIOR_MIN_TERMS+1. +*/ +#define INTERIOR_MIN_TERMS 7 +#if INTERIOR_MIN_TERMS<1 +# error INTERIOR_MIN_TERMS must be greater than 0. +#endif + +/* ROOT_MAX controls how much data is stored inline in the segment +** directory. +*/ +/* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's +** only here so that interiorWriterRootInfo() and leafWriterRootInfo() +** can both see it, but if the caller passed it in, we wouldn't even +** need a define. +*/ +#define ROOT_MAX 1024 +#if ROOT_MAXterm, 0); + dataBufferReplace(&block->term, pTerm, nTerm); + + n = putVarint(c, iHeight); + n += putVarint(c+n, iChildBlock); + dataBufferInit(&block->data, INTERIOR_MAX); + dataBufferReplace(&block->data, c, n); + + return block; +} + +#ifndef NDEBUG +/* Verify that the data is readable as an interior node. */ +static void interiorBlockValidate(InteriorBlock *pBlock){ + const char *pData = pBlock->data.pData; + int nData = pBlock->data.nData; + int n, iDummy; + sqlite_int64 iBlockid; + + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with height of node as a varint(n), n>0 */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n0 ); + assert( n<=nData ); + pData += n; + nData -= n; + + /* Zero or more terms of positive length */ + if( nData!=0 ){ + /* First term is not delta-encoded. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + + /* Following terms delta-encoded. */ + while( nData!=0 ){ + /* Length of shared prefix. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + } + } +} +#define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) +#else +#define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) +#endif + +typedef struct InteriorWriter { + int iHeight; /* from 0 at leaves. */ + InteriorBlock *first, *last; + struct InteriorWriter *parentWriter; + + DataBuffer term; /* Last term written to block "last". */ + sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ +#ifndef NDEBUG + sqlite_int64 iLastChildBlock; /* for consistency checks. */ +#endif +} InteriorWriter; + +/* Initialize an interior node where pTerm[nTerm] marks the leftmost +** term in the tree. iChildBlock is the leftmost child block at the +** next level down the tree. +*/ +static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, + sqlite_int64 iChildBlock, + InteriorWriter *pWriter){ + InteriorBlock *block; + assert( iHeight>0 ); + CLEAR(pWriter); + + pWriter->iHeight = iHeight; + pWriter->iOpeningChildBlock = iChildBlock; +#ifndef NDEBUG + pWriter->iLastChildBlock = iChildBlock; +#endif + block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); + pWriter->last = pWriter->first = block; + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + dataBufferInit(&pWriter->term, 0); +} + +/* Append the child node rooted at iChildBlock to the interior node, +** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. +*/ +static void interiorWriterAppend(InteriorWriter *pWriter, + const char *pTerm, int nTerm, + sqlite_int64 iChildBlock){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + + /* The first term written into an interior node is actually + ** associated with the second child added (the first child was added + ** in interiorWriterInit, or in the if clause at the bottom of this + ** function). That term gets encoded straight up, with nPrefix left + ** at 0. + */ + if( pWriter->term.nData==0 ){ + n = putVarint(c, nTerm); + }else{ + while( nPrefixterm.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + } + + n = putVarint(c, nPrefix); + n += putVarint(c+n, nTerm-nPrefix); + } + +#ifndef NDEBUG + pWriter->iLastChildBlock++; +#endif + assert( pWriter->iLastChildBlock==iChildBlock ); + + /* Overflow to a new block if the new term makes the current block + ** too big, and the current block already has enough terms. + */ + if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && + iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ + pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, + pTerm, nTerm); + pWriter->last = pWriter->last->next; + pWriter->iOpeningChildBlock = iChildBlock; + dataBufferReset(&pWriter->term); + }else{ + dataBufferAppend2(&pWriter->last->data, c, n, + pTerm+nPrefix, nTerm-nPrefix); + dataBufferReplace(&pWriter->term, pTerm, nTerm); + } + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); +} + +/* Free the space used by pWriter, including the linked-list of +** InteriorBlocks, and parentWriter, if present. +*/ +static int interiorWriterDestroy(InteriorWriter *pWriter){ + InteriorBlock *block = pWriter->first; + + while( block!=NULL ){ + InteriorBlock *b = block; + block = block->next; + dataBufferDestroy(&b->term); + dataBufferDestroy(&b->data); + free(b); + } + if( pWriter->parentWriter!=NULL ){ + interiorWriterDestroy(pWriter->parentWriter); + free(pWriter->parentWriter); + } + dataBufferDestroy(&pWriter->term); + SCRAMBLE(pWriter); + return SQLITE_OK; +} + +/* If pWriter can fit entirely in ROOT_MAX, return it as the root info +** directly, leaving *piEndBlockid unchanged. Otherwise, flush +** pWriter to %_segments, building a new layer of interior nodes, and +** recursively ask for their root into. +*/ +static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + InteriorBlock *block = pWriter->first; + sqlite_int64 iBlockid = 0; + int rc; + + /* If we can fit the segment inline */ + if( block==pWriter->last && block->data.nDatadata.pData; + *pnRootInfo = block->data.nData; + return SQLITE_OK; + } + + /* Flush the first block to %_segments, and create a new level of + ** interior node. + */ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + pWriter->parentWriter = malloc(sizeof(*pWriter->parentWriter)); + interiorWriterInit(pWriter->iHeight+1, + block->term.pData, block->term.nData, + iBlockid, pWriter->parentWriter); + + /* Flush additional blocks and append to the higher interior + ** node. + */ + for(block=block->next; block!=NULL; block=block->next){ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + interiorWriterAppend(pWriter->parentWriter, + block->term.pData, block->term.nData, iBlockid); + } + + /* Parent node gets the chance to be the root. */ + return interiorWriterRootInfo(v, pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/****************************************************************/ +/* InteriorReader is used to read off the data from an interior node +** (see comment at top of file for the format). +*/ +typedef struct InteriorReader { + const char *pData; + int nData; + + DataBuffer term; /* previous term, for decoding term delta. */ + + sqlite_int64 iBlockid; +} InteriorReader; + +static void interiorReaderDestroy(InteriorReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +/* TODO(shess) The assertions are great, but what if we're in NDEBUG +** and the blob is empty or otherwise contains suspect data? +*/ +static void interiorReaderInit(const char *pData, int nData, + InteriorReader *pReader){ + int n, nTerm; + + /* Require at least the leading flag byte */ + assert( nData>0 ); + assert( pData[0]!='\0' ); + + CLEAR(pReader); + + /* Decode the base blockid, and set the cursor to the first term. */ + n = getVarint(pData+1, &pReader->iBlockid); + assert( 1+n<=nData ); + pReader->pData = pData+1+n; + pReader->nData = nData-(1+n); + + /* A single-child interior node (such as when a leaf node was too + ** large for the segment directory) won't have any terms. + ** Otherwise, decode the first term. + */ + if( pReader->nData==0 ){ + dataBufferInit(&pReader->term, 0); + }else{ + n = getVarint32(pReader->pData, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); + assert( n+nTerm<=pReader->nData ); + pReader->pData += n+nTerm; + pReader->nData -= n+nTerm; + } +} + +static int interiorReaderAtEnd(InteriorReader *pReader){ + return pReader->term.nData==0; +} + +static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ + return pReader->iBlockid; +} + +static int interiorReaderTermBytes(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.nData; +} +static const char *interiorReaderTerm(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.pData; +} + +/* Step forward to the next term in the node. */ +static void interiorReaderStep(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + + /* If the last term has been read, signal eof, else construct the + ** next term. + */ + if( pReader->nData==0 ){ + dataBufferReset(&pReader->term); + }else{ + int n, nPrefix, nSuffix; + + n = getVarint32(pReader->pData, &nPrefix); + n += getVarint32(pReader->pData+n, &nSuffix); + + /* Truncate the current term and append suffix data. */ + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + assert( n+nSuffix<=pReader->nData ); + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } + pReader->iBlockid++; +} + +/* Compare the current term to pTerm[nTerm], returning strcmp-style +** results. If isPrefix, equality means equal through nTerm bytes. +*/ +static int interiorReaderTermCmp(InteriorReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + const char *pReaderTerm = interiorReaderTerm(pReader); + int nReaderTerm = interiorReaderTermBytes(pReader); + int c, n = nReaderTerm0 ) return -1; + if( nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReaderTerm, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return nReaderTerm - nTerm; +} + +/****************************************************************/ +/* LeafWriter is used to collect terms and associated doclist data +** into leaf blocks in %_segments (see top of file for format info). +** Expected usage is: +** +** LeafWriter writer; +** leafWriterInit(0, 0, &writer); +** while( sorted_terms_left_to_process ){ +** // data is doclist data for that term. +** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); +** if( rc!=SQLITE_OK ) goto err; +** } +** rc = leafWriterFinalize(v, &writer); +**err: +** leafWriterDestroy(&writer); +** return rc; +** +** leafWriterStep() may write a collected leaf out to %_segments. +** leafWriterFinalize() finishes writing any buffered data and stores +** a root node in %_segdir. leafWriterDestroy() frees all buffers and +** InteriorWriters allocated as part of writing this segment. +** +** TODO(shess) Document leafWriterStepMerge(). +*/ + +/* Put terms with data this big in their own block. */ +#define STANDALONE_MIN 1024 + +/* Keep leaf blocks below this size. */ +#define LEAF_MAX 2048 + +typedef struct LeafWriter { + int iLevel; + int idx; + sqlite_int64 iStartBlockid; /* needed to create the root info */ + sqlite_int64 iEndBlockid; /* when we're done writing. */ + + DataBuffer term; /* previous encoded term */ + DataBuffer data; /* encoding buffer */ + + /* bytes of first term in the current node which distinguishes that + ** term from the last term of the previous node. + */ + int nTermDistinct; + + InteriorWriter parentWriter; /* if we overflow */ + int has_parent; +} LeafWriter; + +static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ + CLEAR(pWriter); + pWriter->iLevel = iLevel; + pWriter->idx = idx; + + dataBufferInit(&pWriter->term, 32); + + /* Start out with a reasonably sized block, though it can grow. */ + dataBufferInit(&pWriter->data, LEAF_MAX); +} + +#ifndef NDEBUG +/* Verify that the data is readable as a leaf node. */ +static void leafNodeValidate(const char *pData, int nData){ + int n, iDummy; + + if( nData==0 ) return; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with a varint(0) */ + n = getVarint32(pData, &iDummy); + assert( iDummy==0 ); + assert( n>0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + + /* Verify that trailing terms and doclists also are readable. */ + while( nData!=0 ){ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + } +} +#define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) +#else +#define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) +#endif + +/* Flush the current leaf node to %_segments, and adding the resulting +** blockid and the starting term to the interior node which will +** contain it. +*/ +static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, + int iData, int nData){ + sqlite_int64 iBlockid = 0; + const char *pStartingTerm; + int nStartingTerm, rc, n; + + /* Must have the leading varint(0) flag, plus at least some + ** valid-looking data. + */ + assert( nData>2 ); + assert( iData>=0 ); + assert( iData+nData<=pWriter->data.nData ); + ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); + + rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + assert( iBlockid!=0 ); + + /* Reconstruct the first term in the leaf for purposes of building + ** the interior node. + */ + n = getVarint32(pWriter->data.pData+iData+1, &nStartingTerm); + pStartingTerm = pWriter->data.pData+iData+1+n; + assert( pWriter->data.nData>iData+1+n+nStartingTerm ); + assert( pWriter->nTermDistinct>0 ); + assert( pWriter->nTermDistinct<=nStartingTerm ); + nStartingTerm = pWriter->nTermDistinct; + + if( pWriter->has_parent ){ + interiorWriterAppend(&pWriter->parentWriter, + pStartingTerm, nStartingTerm, iBlockid); + }else{ + interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, + &pWriter->parentWriter); + pWriter->has_parent = 1; + } + + /* Track the span of this segment's leaf nodes. */ + if( pWriter->iEndBlockid==0 ){ + pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; + }else{ + pWriter->iEndBlockid++; + assert( iBlockid==pWriter->iEndBlockid ); + } + + return SQLITE_OK; +} +static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ + int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); + if( rc!=SQLITE_OK ) return rc; + + /* Re-initialize the output buffer. */ + dataBufferReset(&pWriter->data); + + return SQLITE_OK; +} + +/* Fetch the root info for the segment. If the entire leaf fits +** within ROOT_MAX, then it will be returned directly, otherwise it +** will be flushed and the root info will be returned from the +** interior node. *piEndBlockid is set to the blockid of the last +** interior or leaf node written to disk (0 if none are written at +** all). +*/ +static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + /* we can fit the segment entirely inline */ + if( !pWriter->has_parent && pWriter->data.nDatadata.pData; + *pnRootInfo = pWriter->data.nData; + *piEndBlockid = 0; + return SQLITE_OK; + } + + /* Flush remaining leaf data. */ + if( pWriter->data.nData>0 ){ + int rc = leafWriterFlush(v, pWriter); + if( rc!=SQLITE_OK ) return rc; + } + + /* We must have flushed a leaf at some point. */ + assert( pWriter->has_parent ); + + /* Tenatively set the end leaf blockid as the end blockid. If the + ** interior node can be returned inline, this will be the final + ** blockid, otherwise it will be overwritten by + ** interiorWriterRootInfo(). + */ + *piEndBlockid = pWriter->iEndBlockid; + + return interiorWriterRootInfo(v, &pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/* Collect the rootInfo data and store it into the segment directory. +** This has the effect of flushing the segment's leaf data to +** %_segments, and also flushing any interior nodes to %_segments. +*/ +static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ + sqlite_int64 iEndBlockid; + char *pRootInfo; + int rc, nRootInfo; + + rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + /* Don't bother storing an entirely empty segment. */ + if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; + + return segdir_set(v, pWriter->iLevel, pWriter->idx, + pWriter->iStartBlockid, pWriter->iEndBlockid, + iEndBlockid, pRootInfo, nRootInfo); +} + +static void leafWriterDestroy(LeafWriter *pWriter){ + if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); + dataBufferDestroy(&pWriter->term); + dataBufferDestroy(&pWriter->data); +} + +/* Encode a term into the leafWriter, delta-encoding as appropriate. +** Returns the length of the new term which distinguishes it from the +** previous term, which can be used to set nTermDistinct when a node +** boundary is crossed. +*/ +static int leafWriterEncodeTerm(LeafWriter *pWriter, + const char *pTerm, int nTerm){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + assert( nTerm>0 ); + while( nPrefixterm.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + /* Failing this implies that the terms weren't in order. */ + assert( nPrefixdata.nData==0 ){ + /* Encode the node header and leading term as: + ** varint(0) + ** varint(nTerm) + ** char pTerm[nTerm] + */ + n = putVarint(c, '\0'); + n += putVarint(c+n, nTerm); + dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); + }else{ + /* Delta-encode the term as: + ** varint(nPrefix) + ** varint(nSuffix) + ** char pTermSuffix[nSuffix] + */ + n = putVarint(c, nPrefix); + n += putVarint(c+n, nTerm-nPrefix); + dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); + } + dataBufferReplace(&pWriter->term, pTerm, nTerm); + + return nPrefix+1; +} + +/* Used to avoid a memmove when a large amount of doclist data is in +** the buffer. This constructs a node and term header before +** iDoclistData and flushes the resulting complete node using +** leafWriterInternalFlush(). +*/ +static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + int iDoclistData){ + char c[VARINT_MAX+VARINT_MAX]; + int iData, n = putVarint(c, 0); + n += putVarint(c+n, nTerm); + + /* There should always be room for the header. Even if pTerm shared + ** a substantial prefix with the previous term, the entire prefix + ** could be constructed from earlier data in the doclist, so there + ** should be room. + */ + assert( iDoclistData>=n+nTerm ); + + iData = iDoclistData-(n+nTerm); + memcpy(pWriter->data.pData+iData, c, n); + memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); + + return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + DLReader *pReaders, int nReaders){ + char c[VARINT_MAX+VARINT_MAX]; + int iTermData = pWriter->data.nData, iDoclistData; + int i, nData, n, nActualData, nActual, rc, nTermDistinct; + + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); + + /* Remember nTermDistinct if opening a new node. */ + if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; + + iDoclistData = pWriter->data.nData; + + /* Estimate the length of the merged doclist so we can leave space + ** to encode it. + */ + for(i=0, nData=0; idata, c, n); + + docListMerge(&pWriter->data, pReaders, nReaders); + ASSERT_VALID_DOCLIST(DL_DEFAULT, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-iDoclistData-n, NULL); + + /* The actual amount of doclist data at this point could be smaller + ** than the length we encoded. Additionally, the space required to + ** encode this length could be smaller. For small doclists, this is + ** not a big deal, we can just use memmove() to adjust things. + */ + nActualData = pWriter->data.nData-(iDoclistData+n); + nActual = putVarint(c, nActualData); + assert( nActualData<=nData ); + assert( nActual<=n ); + + /* If the new doclist is big enough for force a standalone leaf + ** node, we can immediately flush it inline without doing the + ** memmove(). + */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData-iTermData>STANDALONE_MIN. + */ + if( nTerm+nActualData>STANDALONE_MIN ){ + /* Push leaf node from before this term. */ + if( iTermData>0 ){ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + } + + /* Fix the encoded doclist length. */ + iDoclistData += n - nActual; + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* Push the standalone leaf node. */ + rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); + if( rc!=SQLITE_OK ) return rc; + + /* Leave the node empty. */ + dataBufferReset(&pWriter->data); + + return rc; + } + + /* At this point, we know that the doclist was small, so do the + ** memmove if indicated. + */ + if( nActualdata.pData+iDoclistData+nActual, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-(iDoclistData+n)); + pWriter->data.nData -= n-nActual; + } + + /* Replace written length with actual length. */ + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* If the node is too large, break things up. */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData>LEAF_MAX. + */ + if( iTermData+nTerm+nActualData>LEAF_MAX ){ + /* Flush out the leading data as a node */ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + + /* Rebuild header using the current term */ + n = putVarint(pWriter->data.pData, 0); + n += putVarint(pWriter->data.pData+n, nTerm); + memcpy(pWriter->data.pData+n, pTerm, nTerm); + n += nTerm; + + /* There should always be room, because the previous encoding + ** included all data necessary to construct the term. + */ + assert( ndata.nData-iDoclistDatadata.pData+n, + pWriter->data.pData+iDoclistData, + pWriter->data.nData-iDoclistData); + pWriter->data.nData -= iDoclistData-n; + } + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + + return SQLITE_OK; +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +/* TODO(shess) Revise writeZeroSegment() so that doclists are +** constructed directly in pWriter->data. +*/ +static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + const char *pData, int nData){ + int rc; + DLReader reader; + + dlrInit(&reader, DL_DEFAULT, pData, nData); + rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); + dlrDestroy(&reader); + + return rc; +} + + +/****************************************************************/ +/* LeafReader is used to iterate over an individual leaf node. */ +typedef struct LeafReader { + DataBuffer term; /* copy of current term. */ + + const char *pData; /* data for current term. */ + int nData; +} LeafReader; + +static void leafReaderDestroy(LeafReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +static int leafReaderAtEnd(LeafReader *pReader){ + return pReader->nData<=0; +} + +/* Access the current term. */ +static int leafReaderTermBytes(LeafReader *pReader){ + return pReader->term.nData; +} +static const char *leafReaderTerm(LeafReader *pReader){ + assert( pReader->term.nData>0 ); + return pReader->term.pData; +} + +/* Access the doclist data for the current term. */ +static int leafReaderDataBytes(LeafReader *pReader){ + int nData; + assert( pReader->term.nData>0 ); + getVarint32(pReader->pData, &nData); + return nData; +} +static const char *leafReaderData(LeafReader *pReader){ + int n, nData; + assert( pReader->term.nData>0 ); + n = getVarint32(pReader->pData, &nData); + return pReader->pData+n; +} + +static void leafReaderInit(const char *pData, int nData, + LeafReader *pReader){ + int nTerm, n; + + assert( nData>0 ); + assert( pData[0]=='\0' ); + + CLEAR(pReader); + + /* Read the first term, skipping the header byte. */ + n = getVarint32(pData+1, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pData+1+n, nTerm); + + /* Position after the first term. */ + assert( 1+n+nTermpData = pData+1+n+nTerm; + pReader->nData = nData-1-n-nTerm; +} + +/* Step the reader forward to the next term. */ +static void leafReaderStep(LeafReader *pReader){ + int n, nData, nPrefix, nSuffix; + assert( !leafReaderAtEnd(pReader) ); + + /* Skip previous entry's data block. */ + n = getVarint32(pReader->pData, &nData); + assert( n+nData<=pReader->nData ); + pReader->pData += n+nData; + pReader->nData -= n+nData; + + if( !leafReaderAtEnd(pReader) ){ + /* Construct the new term using a prefix from the old term plus a + ** suffix from the leaf data. + */ + n = getVarint32(pReader->pData, &nPrefix); + n += getVarint32(pReader->pData+n, &nSuffix); + assert( n+nSuffixnData ); + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } +} + +/* strcmp-style comparison of pReader's current term against pTerm. +** If isPrefix, equality means equal through nTerm bytes. +*/ +static int leafReaderTermCmp(LeafReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + int c, n = pReader->term.nDataterm.nData : nTerm; + if( n==0 ){ + if( pReader->term.nData>0 ) return -1; + if(nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReader->term.pData, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return pReader->term.nData - nTerm; +} + + +/****************************************************************/ +/* LeavesReader wraps LeafReader to allow iterating over the entire +** leaf layer of the tree. +*/ +typedef struct LeavesReader { + int idx; /* Index within the segment. */ + + sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ + int eof; /* we've seen SQLITE_DONE from pStmt. */ + + LeafReader leafReader; /* reader for the current leaf. */ + DataBuffer rootData; /* root data for inline. */ +} LeavesReader; + +/* Access the current term. */ +static int leavesReaderTermBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTermBytes(&pReader->leafReader); +} +static const char *leavesReaderTerm(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTerm(&pReader->leafReader); +} + +/* Access the doclist data for the current term. */ +static int leavesReaderDataBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderDataBytes(&pReader->leafReader); +} +static const char *leavesReaderData(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderData(&pReader->leafReader); +} + +static int leavesReaderAtEnd(LeavesReader *pReader){ + return pReader->eof; +} + +/* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus +** leaving the statement handle open, which locks the table. +*/ +/* TODO(shess) This "solution" is not satisfactory. Really, there +** should be check-in function for all statement handles which +** arranges to call sqlite3_reset(). This most likely will require +** modification to control flow all over the place, though, so for now +** just punt. +** +** Note the the current system assumes that segment merges will run to +** completion, which is why this particular probably hasn't arisen in +** this case. Probably a brittle assumption. +*/ +static int leavesReaderReset(LeavesReader *pReader){ + return sqlite3_reset(pReader->pStmt); +} + +static void leavesReaderDestroy(LeavesReader *pReader){ + leafReaderDestroy(&pReader->leafReader); + dataBufferDestroy(&pReader->rootData); + SCRAMBLE(pReader); +} + +/* Initialize pReader with the given root data (if iStartBlockid==0 +** the leaf data was entirely contained in the root), or from the +** stream of blocks between iStartBlockid and iEndBlockid, inclusive. +*/ +static int leavesReaderInit(fulltext_vtab *v, + int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData, + LeavesReader *pReader){ + CLEAR(pReader); + pReader->idx = idx; + + dataBufferInit(&pReader->rootData, 0); + if( iStartBlockid==0 ){ + /* Entire leaf level fit in root data. */ + dataBufferReplace(&pReader->rootData, pRootData, nRootData); + leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, + &pReader->leafReader); + }else{ + sqlite3_stmt *s; + int rc = sql_get_leaf_statement(v, idx, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + pReader->eof = 1; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + pReader->pStmt = s; + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Step the current leaf forward to the next term. If we reach the +** end of the current leaf, step forward to the next leaf block. +*/ +static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ + assert( !leavesReaderAtEnd(pReader) ); + leafReaderStep(&pReader->leafReader); + + if( leafReaderAtEnd(&pReader->leafReader) ){ + int rc; + if( pReader->rootData.pData ){ + pReader->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_step(pReader->pStmt); + if( rc!=SQLITE_ROW ){ + pReader->eof = 1; + return rc==SQLITE_DONE ? SQLITE_OK : rc; + } + leafReaderDestroy(&pReader->leafReader); + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Order LeavesReaders by their term, ignoring idx. Readers at eof +** always sort to the end. +*/ +static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ + if( leavesReaderAtEnd(lr1) ){ + if( leavesReaderAtEnd(lr2) ) return 0; + return 1; + } + if( leavesReaderAtEnd(lr2) ) return -1; + + return leafReaderTermCmp(&lr1->leafReader, + leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), + 0); +} + +/* Similar to leavesReaderTermCmp(), with additional ordering by idx +** so that older segments sort before newer segments. +*/ +static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ + int c = leavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->idx-lr2->idx; +} + +/* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its +** sorted position. +*/ +static void leavesReaderReorder(LeavesReader *pLr, int nLr){ + while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ + LeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* Initializes pReaders with the segments from level iLevel, returning +** the number of segments in *piReaders. Leaves pReaders in sorted +** order. +*/ +static int leavesReadersInit(fulltext_vtab *v, int iLevel, + LeavesReader *pReaders, int *piReaders){ + sqlite3_stmt *s; + int i, rc = sql_get_statement(v, SEGDIR_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + assert( i0 ){ + leavesReaderDestroy(&pReaders[i]); + } + return rc; + } + + *piReaders = i; + + /* Leave our results sorted by term, then age. */ + while( i-- ){ + leavesReaderReorder(pReaders+i, *piReaders-i); + } + return SQLITE_OK; +} + +/* Merge doclists from pReaders[nReaders] into a single doclist, which +** is written to pWriter. Assumes pReaders is ordered oldest to +** newest. +*/ +/* TODO(shess) Consider putting this inline in segmentMerge(). */ +static int leavesReadersMerge(fulltext_vtab *v, + LeavesReader *pReaders, int nReaders, + LeafWriter *pWriter){ + DLReader dlReaders[MERGE_COUNT]; + const char *pTerm = leavesReaderTerm(pReaders); + int i, nTerm = leavesReaderTermBytes(pReaders); + + assert( nReaders<=MERGE_COUNT ); + + for(i=0; i0 ){ + rc = leavesReaderStep(v, lrs+i); + if( rc!=SQLITE_OK ) goto err; + + /* Reorder by term, then by age. */ + leavesReaderReorder(lrs+i, MERGE_COUNT-i); + } + } + + for(i=0; i0 ); + + /* Process while the prefix matches. */ + while( !leavesReaderAtEnd(pReader) ){ + /* TODO(shess) Really want leavesReaderTermCmp(), but that name is + ** already taken to compare the terms of two LeavesReaders. Think + ** on a better name. [Meanwhile, break encapsulation rather than + ** use a confusing name.] + */ + int rc; + int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); + if( c==0 ){ + const char *pData = leavesReaderData(pReader); + int nData = leavesReaderDataBytes(pReader); + if( out->nData==0 ){ + dataBufferReplace(out, pData, nData); + }else{ + DataBuffer result; + dataBufferInit(&result, out->nData+nData); + docListUnion(out->pData, out->nData, pData, nData, &result); + dataBufferDestroy(out); + *out = result; + /* TODO(shess) Rather than destroy out, we could retain it for + ** later reuse. + */ + } + } + if( c>0 ) break; /* Past any possible matches. */ + + rc = leavesReaderStep(v, pReader); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Call loadSegmentLeavesInt() with pData/nData as input. */ +static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + LeavesReader reader; + int rc; + + assert( nData>1 ); + assert( *pData=='\0' ); + rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to +** iEndLeaf (inclusive) as input, and merge the resulting doclist into +** out. +*/ +static int loadSegmentLeaves(fulltext_vtab *v, + sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + int rc; + LeavesReader reader; + + assert( iStartLeaf<=iEndLeaf ); + rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Taking pData/nData as an interior node, find the sequence of child +** nodes which could include pTerm/nTerm/isPrefix. Note that the +** interior node terms logically come between the blocks, so there is +** one more blockid than there are terms (that block contains terms >= +** the last interior-node term). +*/ +/* TODO(shess) The calling code may already know that the end child is +** not worth calculating, because the end may be in a later sibling +** node. Consider whether breaking symmetry is worthwhile. I suspect +** it's not worthwhile. +*/ +static void getChildrenContaining(const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, + sqlite_int64 *piEndChild){ + InteriorReader reader; + + assert( nData>1 ); + assert( *pData!='\0' ); + interiorReaderInit(pData, nData, &reader); + + /* Scan for the first child which could contain pTerm/nTerm. */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; + interiorReaderStep(&reader); + } + *piStartChild = interiorReaderCurrentBlockid(&reader); + + /* Keep scanning to find a term greater than our term, using prefix + ** comparison if indicated. If isPrefix is false, this will be the + ** same blockid as the starting block. + */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; + interiorReaderStep(&reader); + } + *piEndChild = interiorReaderCurrentBlockid(&reader); + + interiorReaderDestroy(&reader); + + /* Children must ascend, and if !prefix, both must be the same. */ + assert( *piEndChild>=*piStartChild ); + assert( isPrefix || *piStartChild==*piEndChild ); +} + +/* Read block at iBlockid and pass it with other params to +** getChildrenContaining(). +*/ +static int loadAndGetChildrenContaining( + fulltext_vtab *v, + sqlite_int64 iBlockid, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, sqlite_int64 *piEndChild +){ + sqlite3_stmt *s = NULL; + int rc; + + assert( iBlockid!=0 ); + assert( pTerm!=NULL ); + assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ + assert( piStartChild!=NULL ); + assert( piEndChild!=NULL ); + + rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ERROR; + if( rc!=SQLITE_ROW ) return rc; + + getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0), + pTerm, nTerm, isPrefix, piStartChild, piEndChild); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain + * locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + return SQLITE_OK; +} + +/* Traverse the tree represented by pData[nData] looking for +** pTerm[nTerm], placing its doclist into *out. This is internal to +** loadSegment() to make error-handling cleaner. +*/ +static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + /* Special case where root is a leaf. */ + if( *pData=='\0' ){ + return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); + }else{ + int rc; + sqlite_int64 iStartChild, iEndChild; + + /* Process pData as an interior node, then loop down the tree + ** until we find the set of leaf nodes to scan for the term. + */ + getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, + &iStartChild, &iEndChild); + while( iStartChild>iLeavesEnd ){ + sqlite_int64 iNextStart, iNextEnd; + rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, + &iNextStart, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + + /* If we've branched, follow the end branch, too. */ + if( iStartChild!=iEndChild ){ + sqlite_int64 iDummy; + rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, + &iDummy, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + } + + assert( iNextStart<=iNextEnd ); + iStartChild = iNextStart; + iEndChild = iNextEnd; + } + assert( iStartChild<=iLeavesEnd ); + assert( iEndChild<=iLeavesEnd ); + + /* Scan through the leaf segments for doclists. */ + return loadSegmentLeaves(v, iStartChild, iEndChild, + pTerm, nTerm, isPrefix, out); + } +} + +/* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then +** merge its doclist over *out (any duplicate doclists read from the +** segment rooted at pData will overwrite those in *out). +*/ +/* TODO(shess) Consider changing this to determine the depth of the +** leaves using either the first characters of interior nodes (when +** ==1, we're one level above the leaves), or the first character of +** the root (which will describe the height of the tree directly). +** Either feels somewhat tricky to me. +*/ +/* TODO(shess) The current merge is likely to be slow for large +** doclists (though it should process from newest/smallest to +** oldest/largest, so it may not be that bad). It might be useful to +** modify things to allow for N-way merging. This could either be +** within a segment, with pairwise merges across segments, or across +** all segments at once. +*/ +static int loadSegment(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + DataBuffer result; + int rc; + + assert( nData>1 ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&result, 0); + rc = loadSegmentInt(v, pData, nData, iLeavesEnd, + pTerm, nTerm, isPrefix, &result); + if( rc==SQLITE_OK && result.nData>0 ){ + if( out->nData==0 ){ + DataBuffer tmp = *out; + *out = result; + result = tmp; + }else{ + DataBuffer merged; + DLReader readers[2]; + + dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); + dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); + dataBufferInit(&merged, out->nData+result.nData); + docListMerge(&merged, readers, 2); + dataBufferDestroy(out); + *out = merged; + dlrDestroy(&readers[0]); + dlrDestroy(&readers[1]); + } + } + dataBufferDestroy(&result); + return rc; +} + +/* Scan the database and merge together the posting lists for the term +** into *out. +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out){ + DataBuffer doclist; + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&doclist, 0); + + /* Traverse the segments from oldest to newest so that newer doclist + ** elements for given docids overwrite older elements. + */ + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + const char *pData = sqlite3_column_blob(s, 0); + const int nData = sqlite3_column_bytes(s, 0); + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, + &doclist); + if( rc!=SQLITE_OK ) goto err; + } + if( rc==SQLITE_DONE ){ + if( doclist.nData!=0 ){ + /* TODO(shess) The old term_select_all() code applied the column + ** restrict as we merged segments, leading to smaller buffers. + ** This is probably worthwhile to bring back, once the new storage + ** system is checked in. + */ + if( iColumn==v->nColumn) iColumn = -1; + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + iColumn, iType, out); + } + rc = SQLITE_OK; + } + + err: + dataBufferDestroy(&doclist); + return rc; +} + +/****************************************************************/ +/* Used to hold hashtable data for sorting. */ +typedef struct TermData { + const char *pTerm; + int nTerm; + DLCollector *pCollector; +} TermData; + +/* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 +** for equal, >0 for greater-than). +*/ +static int termDataCmp(const void *av, const void *bv){ + const TermData *a = (const TermData *)av; + const TermData *b = (const TermData *)bv; + int n = a->nTermnTerm ? a->nTerm : b->nTerm; + int c = memcmp(a->pTerm, b->pTerm, n); + if( c!=0 ) return c; + return a->nTerm-b->nTerm; +} + +/* Order pTerms data by term, then write a new level 0 segment using +** LeafWriter. +*/ +static int writeZeroSegment(fulltext_vtab *v, fts3Hash *pTerms){ + fts3HashElem *e; + int idx, rc, i, n; + TermData *pData; + LeafWriter writer; + DataBuffer dl; + + /* Determine the next index at level 0, merging as necessary. */ + rc = segdirNextIndex(v, 0, &idx); + if( rc!=SQLITE_OK ) return rc; + + n = fts3HashCount(pTerms); + pData = malloc(n*sizeof(TermData)); + + for(i = 0, e = fts3HashFirst(pTerms); e; i++, e = fts3HashNext(e)){ + assert( i1 ) qsort(pData, n, sizeof(*pData), termDataCmp); + + /* TODO(shess) Refactor so that we can write directly to the segment + ** DataBuffer, as happens for segment merges. + */ + leafWriterInit(0, idx, &writer); + dataBufferInit(&dl, 0); + for(i=0; inPendingData>=0 ){ + fts3HashElem *e; + for(e=fts3HashFirst(&v->pendingTerms); e; e=fts3HashNext(e)){ + dlcDelete(fts3HashData(e)); + } + fts3HashClear(&v->pendingTerms); + v->nPendingData = -1; + } + return SQLITE_OK; +} + +/* If pendingTerms has data, flush it to a level-zero segment, and +** free it. +*/ +static int flushPendingTerms(fulltext_vtab *v){ + if( v->nPendingData>=0 ){ + int rc = writeZeroSegment(v, &v->pendingTerms); + if( rc==SQLITE_OK ) clearPendingTerms(v); + return rc; + } + return SQLITE_OK; +} + +/* If pendingTerms is "too big", or docid is out of order, flush it. +** Regardless, be certain that pendingTerms is initialized for use. +*/ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + /* TODO(shess) Explore whether partially flushing the buffer on + ** forced-flush would provide better performance. I suspect that if + ** we ordered the doclists by size and flushed the largest until the + ** buffer was half empty, that would let the less frequent terms + ** generate longer doclists. + */ + if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ + int rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + } + if( v->nPendingData<0 ){ + fts3HashInit(&v->pendingTerms, FTS3_HASH_STRING, 1); + v->nPendingData = 0; + } + v->iPrevDocid = iDocid; + return SQLITE_OK; +} + +/* This function implements the xUpdate callback; it's the top-level entry + * point for inserting, deleting or updating a row in a full-text table. */ +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + int rc; + + TRACE(("FTS3 Update %p\n", pVtab)); + + if( nArg<2 ){ + rc = index_delete(v, sqlite3_value_int64(ppArg[0])); + } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + /* An update: + * ppArg[0] = old rowid + * ppArg[1] = new rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + * ppArg[2+v->nColumn+1] = value for docid + */ + sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); + if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the rowid */ + }else if( sqlite3_value_type(ppArg[2+v->nColumn+1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[2+v->nColumn+1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the docid */ + }else{ + assert( nArg==2+v->nColumn+2); + rc = index_update(v, rowid, &ppArg[2]); + } + } else { + /* An insert: + * ppArg[1] = requested rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + * ppArg[2+v->nColumn+1] = value for docid + */ + sqlite3_value *pRequestDocid = ppArg[2+v->nColumn+1]; + assert( nArg==2+v->nColumn+2); + if( SQLITE_NULL != sqlite3_value_type(pRequestDocid) && + SQLITE_NULL != sqlite3_value_type(ppArg[1]) ){ + /* TODO(shess) Consider allowing this to work if the values are + ** identical. I'm inclined to discourage that usage, though, + ** given that both rowid and docid are special columns. Better + ** would be to define one or the other as the default winner, + ** but should it be fts3-centric (docid) or SQLite-centric + ** (rowid)? + */ + rc = SQLITE_ERROR; + }else{ + if( SQLITE_NULL == sqlite3_value_type(pRequestDocid) ){ + pRequestDocid = ppArg[1]; + } + rc = index_insert(v, pRequestDocid, &ppArg[2], pRowid); + } + } + + return rc; +} + +static int fulltextSync(sqlite3_vtab *pVtab){ + TRACE(("FTS3 xSync()\n")); + return flushPendingTerms((fulltext_vtab *)pVtab); +} + +static int fulltextBegin(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + TRACE(("FTS3 xBegin()\n")); + + /* Any buffered updates should have been cleared by the previous + ** transaction. + */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextCommit(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + TRACE(("FTS3 xCommit()\n")); + + /* Buffered updates should have been cleared by fulltextSync(). */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextRollback(sqlite3_vtab *pVtab){ + TRACE(("FTS3 xRollback()\n")); + return clearPendingTerms((fulltext_vtab *)pVtab); +} + +/* +** Implementation of the snippet() function for FTS3 +*/ +static void snippetFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); + }else{ + const char *zStart = ""; + const char *zEnd = ""; + const char *zEllipsis = "..."; + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + if( argc>=2 ){ + zStart = (const char*)sqlite3_value_text(argv[1]); + if( argc>=3 ){ + zEnd = (const char*)sqlite3_value_text(argv[2]); + if( argc>=4 ){ + zEllipsis = (const char*)sqlite3_value_text(argv[3]); + } + } + } + snippetAllOffsets(pCursor); + snippetText(pCursor, zStart, zEnd, zEllipsis); + sqlite3_result_text(pContext, pCursor->snippet.zSnippet, + pCursor->snippet.nSnippet, SQLITE_STATIC); + } +} + +/* +** Implementation of the offsets() function for FTS3 +*/ +static void snippetOffsetsFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to offsets",-1); + }else{ + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + snippetAllOffsets(pCursor); + snippetOffsetText(&pCursor->snippet); + sqlite3_result_text(pContext, + pCursor->snippet.zOffset, pCursor->snippet.nOffset, + SQLITE_STATIC); + } +} + +/* +** This routine implements the xFindFunction method for the FTS3 +** virtual table. +*/ +static int fulltextFindFunction( + sqlite3_vtab *pVtab, + int nArg, + const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg +){ + if( strcmp(zName,"snippet")==0 ){ + *pxFunc = snippetFunc; + return 1; + }else if( strcmp(zName,"offsets")==0 ){ + *pxFunc = snippetOffsetsFunc; + return 1; + } + return 0; +} + +/* +** Rename an fts3 table. +*/ +static int fulltextRename( + sqlite3_vtab *pVtab, + const char *zName +){ + fulltext_vtab *p = (fulltext_vtab *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" + "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" + "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + ); + if( zSql ){ + rc = sqlite3_exec(p->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static const sqlite3_module fts3Module = { + /* iVersion */ 0, + /* xCreate */ fulltextCreate, + /* xConnect */ fulltextConnect, + /* xBestIndex */ fulltextBestIndex, + /* xDisconnect */ fulltextDisconnect, + /* xDestroy */ fulltextDestroy, + /* xOpen */ fulltextOpen, + /* xClose */ fulltextClose, + /* xFilter */ fulltextFilter, + /* xNext */ fulltextNext, + /* xEof */ fulltextEof, + /* xColumn */ fulltextColumn, + /* xRowid */ fulltextRowid, + /* xUpdate */ fulltextUpdate, + /* xBegin */ fulltextBegin, + /* xSync */ fulltextSync, + /* xCommit */ fulltextCommit, + /* xRollback */ fulltextRollback, + /* xFindFunction */ fulltextFindFunction, + /* xRename */ fulltextRename, +}; + +static void hashDestroy(void *p){ + fts3Hash *pHash = (fts3Hash *)p; + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); +} + +/* +** The fts3 built-in tokenizers - "simple" and "porter" - are implemented +** in files fts3_tokenizer1.c and fts3_porter.c respectively. The following +** two forward declarations are for functions declared in these files +** used to retrieve the respective implementations. +** +** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed +** to by the argument to point a the "simple" tokenizer implementation. +** Function ...PorterTokenizerModule() sets *pModule to point to the +** porter tokenizer/stemmer implementation. +*/ +void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +int sqlite3Fts3InitHashTable(sqlite3 *, fts3Hash *, const char *); + +/* +** Initialise the fts3 extension. If this extension is built as part +** of the sqlite library, then this function is called directly by +** SQLite. If fts3 is built as a dynamically loadable extension, this +** function is called by the sqlite3_extension_init() entry point. +*/ +int sqlite3Fts3Init(sqlite3 *db){ + int rc = SQLITE_OK; + fts3Hash *pHash = 0; + const sqlite3_tokenizer_module *pSimple = 0; + const sqlite3_tokenizer_module *pPorter = 0; + const sqlite3_tokenizer_module *pIcu = 0; + + sqlite3Fts3SimpleTokenizerModule(&pSimple); + sqlite3Fts3PorterTokenizerModule(&pPorter); +#ifdef SQLITE_ENABLE_ICU + sqlite3Fts3IcuTokenizerModule(&pIcu); +#endif + + /* Allocate and initialise the hash-table used to store tokenizers. */ + pHash = sqlite3_malloc(sizeof(fts3Hash)); + if( !pHash ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); + } + + /* Load the built-in tokenizers into the hash table */ + if( rc==SQLITE_OK ){ + if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) + || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) + || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) + ){ + rc = SQLITE_NOMEM; + } + } + + /* Create the virtual table wrapper around the hash-table and overload + ** the two scalar functions. If this is successful, register the + ** module with sqlite. + */ + if( SQLITE_OK==rc + && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) + ){ + return sqlite3_create_module_v2( + db, "fts3", &fts3Module, (void *)pHash, hashDestroy + ); + } + + /* An error has occured. Delete the hash table and return the error code. */ + assert( rc!=SQLITE_OK ); + if( pHash ){ + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); + } + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts3Init(db); +} +#endif + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3.h new file mode 100644 index 0000000000..c1aa8caf09 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3.h @@ -0,0 +1,26 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** FTS3 library. All it does is declare the sqlite3Fts3Init() interface. +*/ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3Fts3Init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_hash.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_hash.c new file mode 100644 index 0000000000..b14511a3c9 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_hash.c @@ -0,0 +1,373 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#include +#include +#include + +#include "fts3_hash.h" + +/* +** Malloc and Free functions +*/ +static void *fts3HashMalloc(int n){ + void *p = sqlite3_malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} +static void fts3HashFree(void *p){ + sqlite3_free(p); +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS3_HASH_BINARY or FTS3_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +void sqlite3Fts3HashInit(fts3Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS3_HASH_STRING && keyClass<=FTS3_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3Fts3HashClear(fts3Hash *pH){ + fts3HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + fts3HashFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + fts3HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_STRING +*/ +static int fts3StrHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_BINARY +*/ +static int fts3BinHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int fts3BinCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrHash; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrCompare; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinCompare; + } +} + +/* Link an element into the hash table +*/ +static void fts3HashInsertElement( + fts3Hash *pH, /* The complete hash table */ + struct _fts3ht *pEntry, /* The entry into which pNew is inserted */ + fts3HashElem *pNew /* The element to be inserted */ +){ + fts3HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void fts3Rehash(fts3Hash *pH, int new_size){ + struct _fts3ht *new_ht; /* The new hash table */ + fts3HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts3ht *)fts3HashMalloc( new_size*sizeof(struct _fts3ht) ); + if( new_ht==0 ) return; + fts3HashFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + fts3HashInsertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static fts3HashElem *fts3FindElementByHash( + const fts3Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + fts3HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts3ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void fts3RemoveElementByHash( + fts3Hash *pH, /* The pH containing "elem" */ + fts3HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts3ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts3HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3Fts3HashFind(const fts3Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + fts3HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = fts3FindElementByHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3Fts3HashInsert( + fts3Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + fts3HashElem *elem; /* Used to loop thru the element list */ + fts3HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = fts3FindElementByHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + fts3RemoveElementByHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (fts3HashElem*)fts3HashMalloc( sizeof(fts3HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = fts3HashMalloc( nKey ); + if( new_elem->pKey==0 ){ + fts3HashFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + fts3Rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + fts3HashFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + fts3Rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + fts3HashInsertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_hash.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_hash.h new file mode 100644 index 0000000000..e01954edc8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_hash.h @@ -0,0 +1,110 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS3_HASH_H_ +#define _FTS3_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct fts3Hash fts3Hash; +typedef struct fts3HashElem fts3HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct fts3Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + fts3HashElem *first; /* The first element of the array */ + int htsize; /* Number of buckets in the hash table */ + struct _fts3ht { /* the hash table */ + int count; /* Number of entries with this hash */ + fts3HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct fts3HashElem { + fts3HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS3_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS3_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts3HashInit is 1. +*/ +#define FTS3_HASH_STRING 1 +#define FTS3_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3Fts3HashInit(fts3Hash*, int keytype, int copyKey); +void *sqlite3Fts3HashInsert(fts3Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3Fts3HashFind(const fts3Hash*, const void *pKey, int nKey); +void sqlite3Fts3HashClear(fts3Hash*); + +/* +** Shorthand for the functions above +*/ +#define fts3HashInit sqlite3Fts3HashInit +#define fts3HashInsert sqlite3Fts3HashInsert +#define fts3HashFind sqlite3Fts3HashFind +#define fts3HashClear sqlite3Fts3HashClear + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** fts3Hash h; +** fts3HashElem *p; +** ... +** for(p=fts3HashFirst(&h); p; p=fts3HashNext(p)){ +** SomeStructure *pData = fts3HashData(p); +** // do something with pData +** } +*/ +#define fts3HashFirst(H) ((H)->first) +#define fts3HashNext(E) ((E)->next) +#define fts3HashData(E) ((E)->data) +#define fts3HashKey(E) ((E)->pKey) +#define fts3HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts3HashCount(H) ((H)->count) + +#endif /* _FTS3_HASH_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_icu.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_icu.c new file mode 100644 index 0000000000..86a9a50cfd --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_icu.c @@ -0,0 +1,257 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements a tokenizer for fts3 based on the ICU library. +** +** $Id: fts3_icu.c,v 1.1 2007/08/20 17:37:04 shess Exp $ +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) +#ifdef SQLITE_ENABLE_ICU + +#include +#include +#include "fts3_tokenizer.h" + +#include +#include +#include +#include + +typedef struct IcuTokenizer IcuTokenizer; +typedef struct IcuCursor IcuCursor; + +struct IcuTokenizer { + sqlite3_tokenizer base; + char *zLocale; +}; + +struct IcuCursor { + sqlite3_tokenizer_cursor base; + + UBreakIterator *pIter; /* ICU break-iterator object */ + int nChar; /* Number of UChar elements in pInput */ + UChar *aChar; /* Copy of input using utf-16 encoding */ + int *aOffset; /* Offsets of each character in utf-8 input */ + + int nBuffer; + char *zBuffer; + + int iToken; +}; + +/* +** Create a new tokenizer instance. +*/ +static int icuCreate( + int argc, /* Number of entries in argv[] */ + const char * const *argv, /* Tokenizer creation arguments */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ +){ + IcuTokenizer *p; + int n = 0; + + if( argc>0 ){ + n = strlen(argv[0])+1; + } + p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); + if( !p ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(IcuTokenizer)); + + if( n ){ + p->zLocale = (char *)&p[1]; + memcpy(p->zLocale, argv[0], n); + } + + *ppTokenizer = (sqlite3_tokenizer *)p; + + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int icuDestroy(sqlite3_tokenizer *pTokenizer){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int icuOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, /* Input string */ + int nInput, /* Length of zInput in bytes */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + IcuCursor *pCsr; + + const int32_t opt = U_FOLD_CASE_DEFAULT; + UErrorCode status = U_ZERO_ERROR; + int nChar; + + UChar32 c; + int iInput = 0; + int iOut = 0; + + *ppCursor = 0; + + nChar = nInput+1; + pCsr = (IcuCursor *)sqlite3_malloc( + sizeof(IcuCursor) + /* IcuCursor */ + nChar * sizeof(UChar) + /* IcuCursor.aChar[] */ + (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ + ); + if( !pCsr ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(IcuCursor)); + pCsr->aChar = (UChar *)&pCsr[1]; + pCsr->aOffset = (int *)&pCsr->aChar[nChar]; + + pCsr->aOffset[iOut] = iInput; + U8_NEXT(zInput, iInput, nInput, c); + while( c>0 ){ + int isError = 0; + c = u_foldCase(c, opt); + U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); + if( isError ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->aOffset[iOut] = iInput; + + if( iInputpIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); + if( !U_SUCCESS(status) ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->nChar = iOut; + + ubrk_first(pCsr->pIter); + *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to icuOpen(). +*/ +static int icuClose(sqlite3_tokenizer_cursor *pCursor){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + ubrk_close(pCsr->pIter); + sqlite3_free(pCsr->zBuffer); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. +*/ +static int icuNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + + int iStart = 0; + int iEnd = 0; + int nByte = 0; + + while( iStart==iEnd ){ + UChar32 c; + + iStart = ubrk_current(pCsr->pIter); + iEnd = ubrk_next(pCsr->pIter); + if( iEnd==UBRK_DONE ){ + return SQLITE_DONE; + } + + while( iStartaChar, iWhite, pCsr->nChar, c); + if( u_isspace(c) ){ + iStart = iWhite; + }else{ + break; + } + } + assert(iStart<=iEnd); + } + + do { + UErrorCode status = U_ZERO_ERROR; + if( nByte ){ + char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); + if( !zNew ){ + return SQLITE_NOMEM; + } + pCsr->zBuffer = zNew; + pCsr->nBuffer = nByte; + } + + u_strToUTF8( + pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ + &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ + &status /* Output success/failure */ + ); + } while( nByte>pCsr->nBuffer ); + + *ppToken = pCsr->zBuffer; + *pnBytes = nByte; + *piStartOffset = pCsr->aOffset[iStart]; + *piEndOffset = pCsr->aOffset[iEnd]; + *piPosition = pCsr->iToken++; + + return SQLITE_OK; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module icuTokenizerModule = { + 0, /* iVersion */ + icuCreate, /* xCreate */ + icuDestroy, /* xCreate */ + icuOpen, /* xOpen */ + icuClose, /* xClose */ + icuNext, /* xNext */ +}; + +/* +** Set *ppModule to point at the implementation of the ICU tokenizer. +*/ +void sqlite3Fts3IcuTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &icuTokenizerModule; +} + +#endif /* defined(SQLITE_ENABLE_ICU) */ +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_porter.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_porter.c new file mode 100644 index 0000000000..14e129f4b7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_porter.c @@ -0,0 +1,642 @@ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + + +#include +#include +#include +#include +#include + +#include "fts3_tokenizer.h" + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlit3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module porterTokenizerModule; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + t = (porter_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + c = (porter_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + free(c->zToken); + free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + z[0]!=0 && isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + z[1]!=0 && isVowel(z+1) && + z[2]!=0 && isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i=sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + stem(&z, "lanoita", "ate", m_gt_0) || + stem(&z, "lanoit", "tion", m_gt_0); + break; + case 'c': + stem(&z, "icne", "ence", m_gt_0) || + stem(&z, "icna", "ance", m_gt_0); + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + stem(&z, "ilb", "ble", m_gt_0) || + stem(&z, "illa", "al", m_gt_0) || + stem(&z, "iltne", "ent", m_gt_0) || + stem(&z, "ile", "e", m_gt_0) || + stem(&z, "ilsuo", "ous", m_gt_0); + break; + case 'o': + stem(&z, "noitazi", "ize", m_gt_0) || + stem(&z, "noita", "ate", m_gt_0) || + stem(&z, "rota", "ate", m_gt_0); + break; + case 's': + stem(&z, "msila", "al", m_gt_0) || + stem(&z, "ssenevi", "ive", m_gt_0) || + stem(&z, "ssenluf", "ful", m_gt_0) || + stem(&z, "ssensuo", "ous", m_gt_0); + break; + case 't': + stem(&z, "itila", "al", m_gt_0) || + stem(&z, "itivi", "ive", m_gt_0) || + stem(&z, "itilib", "ble", m_gt_0); + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + stem(&z, "etaci", "ic", m_gt_0) || + stem(&z, "evita", "", m_gt_0) || + stem(&z, "ezila", "al", m_gt_0); + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + stem(&z, "laci", "ic", m_gt_0) || + stem(&z, "luf", "", m_gt_0); + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + stem(&z, "tneme", "", m_gt_1) || + stem(&z, "tnem", "", m_gt_1) || + stem(&z, "tne", "", m_gt_1); + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + stem(&z, "eta", "", m_gt_1) || + stem(&z, "iti", "", m_gt_1); + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char porterIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffsetnInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + c->nAllocated = n+20; + c->zToken = realloc(c->zToken, c->nAllocated); + if( c->zToken==NULL ) return SQLITE_NOMEM; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts3PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer.c new file mode 100644 index 0000000000..7398227257 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer.c @@ -0,0 +1,371 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is part of an SQLite module implementing full-text search. +** This particular file implements the generic tokenizer interface. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + + +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + +#include "fts3_hash.h" +#include "fts3_tokenizer.h" +#include + +/* +** Implementation of the SQL scalar function for accessing the underlying +** hash table. This function may be called as follows: +** +** SELECT (); +** SELECT (, ); +** +** where is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer'). +** +** If the argument is specified, it must be a blob value +** containing a pointer to be stored as the hash data corresponding +** to the string . If is not specified, then +** the string must already exist in the has table. Otherwise, +** an error is returned. +** +** Whether or not the argument is specified, the value returned +** is a blob containing the pointer stored as the hash data corresponding +** to string (after the hash-table is updated, if applicable). +*/ +static void scalarFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts3Hash *pHash; + void *pPtr = 0; + const unsigned char *zName; + int nName; + + assert( argc==1 || argc==2 ); + + pHash = (fts3Hash *)sqlite3_user_data(context); + + zName = sqlite3_value_text(argv[0]); + nName = sqlite3_value_bytes(argv[0])+1; + + if( argc==2 ){ + void *pOld; + int n = sqlite3_value_bytes(argv[1]); + if( n!=sizeof(pPtr) ){ + sqlite3_result_error(context, "argument type mismatch", -1); + return; + } + pPtr = *(void **)sqlite3_value_blob(argv[1]); + pOld = sqlite3Fts3HashInsert(pHash, (void *)zName, nName, pPtr); + if( pOld==pPtr ){ + sqlite3_result_error(context, "out of memory", -1); + return; + } + }else{ + pPtr = sqlite3Fts3HashFind(pHash, zName, nName); + if( !pPtr ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + } + + sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); +} + +#ifdef SQLITE_TEST + +#include +#include + +/* +** Implementation of a special SQL scalar function for testing tokenizers +** designed to be used in concert with the Tcl testing framework. This +** function must be called with two arguments: +** +** SELECT (, ); +** SELECT (, ); +** +** where is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer') +** concatenated with the string '_test' (e.g. 'fts3_tokenizer_test'). +** +** The return value is a string that may be interpreted as a Tcl +** list. For each token in the , three elements are +** added to the returned list. The first is the token position, the +** second is the token text (folded, stemmed, etc.) and the third is the +** substring of associated with the token. For example, +** using the built-in "simple" tokenizer: +** +** SELECT fts_tokenizer_test('simple', 'I don't see how'); +** +** will return the string: +** +** "{0 i I 1 dont don't 2 see see 3 how how}" +** +*/ +static void testFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts3Hash *pHash; + sqlite3_tokenizer_module *p; + sqlite3_tokenizer *pTokenizer = 0; + sqlite3_tokenizer_cursor *pCsr = 0; + + const char *zErr = 0; + + const char *zName; + int nName; + const char *zInput; + int nInput; + + const char *zArg = 0; + + const char *zToken; + int nToken; + int iStart; + int iEnd; + int iPos; + + Tcl_Obj *pRet; + + assert( argc==2 || argc==3 ); + + nName = sqlite3_value_bytes(argv[0]); + zName = (const char *)sqlite3_value_text(argv[0]); + nInput = sqlite3_value_bytes(argv[argc-1]); + zInput = (const char *)sqlite3_value_text(argv[argc-1]); + + if( argc==3 ){ + zArg = (const char *)sqlite3_value_text(argv[1]); + } + + pHash = (fts3Hash *)sqlite3_user_data(context); + p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); + + if( !p ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + + if( SQLITE_OK!=p->xCreate(zArg ? 1 : 0, &zArg, &pTokenizer) ){ + zErr = "error in xCreate()"; + goto finish; + } + pTokenizer->pModule = p; + if( SQLITE_OK!=p->xOpen(pTokenizer, zInput, nInput, &pCsr) ){ + zErr = "error in xOpen()"; + goto finish; + } + pCsr->pTokenizer = pTokenizer; + + while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ + Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + zToken = &zInput[iStart]; + nToken = iEnd-iStart; + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + } + + if( SQLITE_OK!=p->xClose(pCsr) ){ + zErr = "error in xClose()"; + goto finish; + } + if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ + zErr = "error in xDestroy()"; + goto finish; + } + +finish: + if( zErr ){ + sqlite3_result_error(context, zErr, -1); + }else{ + sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); + } + Tcl_DecrRefCount(pRet); +} + +static +int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); +} + +static +int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +/* +** Implementation of the scalar function fts3_tokenizer_internal_test(). +** This function is used for testing only, it is not included in the +** build unless SQLITE_TEST is defined. +** +** The purpose of this is to test that the fts3_tokenizer() function +** can be used as designed by the C-code in the queryTokenizer and +** registerTokenizer() functions above. These two functions are repeated +** in the README.tokenizer file as an example, so it is important to +** test them. +** +** To run the tests, evaluate the fts3_tokenizer_internal_test() scalar +** function with no arguments. An assert() will fail if a problem is +** detected. i.e.: +** +** SELECT fts3_tokenizer_internal_test(); +** +*/ +static void intTestFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int rc; + const sqlite3_tokenizer_module *p1; + const sqlite3_tokenizer_module *p2; + sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); + + /* Test the query function */ + sqlite3Fts3SimpleTokenizerModule(&p1); + rc = queryTokenizer(db, "simple", &p2); + assert( rc==SQLITE_OK ); + assert( p1==p2 ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_ERROR ); + assert( p2==0 ); + assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); + + /* Test the storage function */ + rc = registerTokenizer(db, "nosuchtokenizer", p1); + assert( rc==SQLITE_OK ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_OK ); + assert( p2==p1 ); + + sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); +} + +#endif + +/* +** Set up SQL objects in database db used to access the contents of +** the hash table pointed to by argument pHash. The hash table must +** been initialised to use string keys, and to take a private copy +** of the key when a value is inserted. i.e. by a call similar to: +** +** sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); +** +** This function adds a scalar function (see header comment above +** scalarFunc() in this file for details) and, if ENABLE_TABLE is +** defined at compilation time, a temporary virtual table (see header +** comment above struct HashTableVtab) to the database schema. Both +** provide read/write access to the contents of *pHash. +** +** The third argument to this function, zName, is used as the name +** of both the scalar and, if created, the virtual table. +*/ +int sqlite3Fts3InitHashTable( + sqlite3 *db, + fts3Hash *pHash, + const char *zName +){ + int rc = SQLITE_OK; + void *p = (void *)pHash; + const int any = SQLITE_ANY; + char *zTest = 0; + char *zTest2 = 0; + +#ifdef SQLITE_TEST + void *pdb = (void *)db; + zTest = sqlite3_mprintf("%s_test", zName); + zTest2 = sqlite3_mprintf("%s_internal_test", zName); + if( !zTest || !zTest2 ){ + rc = SQLITE_NOMEM; + } +#endif + + if( rc!=SQLITE_OK + || (rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0)) +#ifdef SQLITE_TEST + || (rc = sqlite3_create_function(db, zTest, 2, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest, 3, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0)) +#endif + ); + + sqlite3_free(zTest); + sqlite3_free(zTest2); + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer.h b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer.h new file mode 100644 index 0000000000..4faef56f24 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer.h @@ -0,0 +1,145 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS3_TOKENIZER_H_ +#define _FTS3_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. When a new tokenizer +** implementation is registered, the caller provides a pointer to +** an sqlite3_tokenizer_module containing pointers to the callback +** functions that make up an implementation. +** +** When an fts3 table is created, it passes any arguments passed to +** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the +** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer +** implementation. The xCreate() function in turn returns an +** sqlite3_tokenizer structure representing the specific tokenizer to +** be used for the fts3 table (customized by the tokenizer clause arguments). +** +** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() +** method is called. It returns an sqlite3_tokenizer_cursor object +** that may be used to tokenize a specific input buffer based on +** the tokenization rules supplied by a specific sqlite3_tokenizer +** object. +*/ +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; + +struct sqlite3_tokenizer_module { + + /* + ** Structure version. Should always be set to 0. + */ + int iVersion; + + /* + ** Create a new tokenizer. The values in the argv[] array are the + ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL + ** TABLE statement that created the fts3 table. For example, if + ** the following SQL is executed: + ** + ** CREATE .. USING fts3( ... , tokenizer arg1 arg2) + ** + ** then argc is set to 2, and the argv[] array contains pointers + ** to the strings "arg1" and "arg2". + ** + ** This method should return either SQLITE_OK (0), or an SQLite error + ** code. If SQLITE_OK is returned, then *ppTokenizer should be set + ** to point at the newly created tokenizer structure. The generic + ** sqlite3_tokenizer.pModule variable should not be initialised by + ** this callback. The caller will do so. + */ + int (*xCreate)( + int argc, /* Size of argv array */ + const char *const*argv, /* Tokenizer argument strings */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ + ); + + /* + ** Destroy an existing tokenizer. The fts3 module calls this method + ** exactly once for each successful call to xCreate(). + */ + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Create a tokenizer cursor to tokenize an input buffer. The caller + ** is responsible for ensuring that the input buffer remains valid + ** until the cursor is closed (using the xClose() method). + */ + int (*xOpen)( + sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ + const char *pInput, int nBytes, /* Input buffer */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ + ); + + /* + ** Destroy an existing tokenizer cursor. The fts3 module calls this + ** method exactly once for each successful call to xOpen(). + */ + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + + /* + ** Retrieve the next token from the tokenizer cursor pCursor. This + ** method should either return SQLITE_OK and set the values of the + ** "OUT" variables identified below, or SQLITE_DONE to indicate that + ** the end of the buffer has been reached, or an SQLite error code. + ** + ** *ppToken should be set to point at a buffer containing the + ** normalized version of the token (i.e. after any case-folding and/or + ** stemming has been performed). *pnBytes should be set to the length + ** of this buffer in bytes. The input text that generated the token is + ** identified by the byte offsets returned in *piStartOffset and + ** *piEndOffset. + ** + ** The buffer *ppToken is set to point at is managed by the tokenizer + ** implementation. It is only required to be valid until the next call + ** to xNext() or xClose(). + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xNext)( + sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ + const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ + int *piStartOffset, /* OUT: Byte offset of token in input buffer */ + int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ + int *piPosition /* OUT: Number of tokens returned before this one */ + ); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +#endif /* _FTS3_TOKENIZER_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer1.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer1.c new file mode 100644 index 0000000000..f53cc1dd46 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/fts3_tokenizer1.c @@ -0,0 +1,229 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + + +#include +#include +#include +#include +#include + +#include "fts3_tokenizer.h" + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module simpleTokenizerModule; + +static int simpleDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = strlen(argv[1]); + for(i=0; i=0x80 ){ + free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !isalnum(i); + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + free(c->pToken); + free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffsetnBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffsetnBytes && simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnBytes && !simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + c->nTokenAllocated = n+20; + c->pToken = realloc(c->pToken, c->nTokenAllocated); + if( c->pToken==NULL ) return SQLITE_NOMEM; + } + for(i=0; ipToken[i] = ch<0x80 ? tolower(ch) : ch; + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts3SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/mkfts3amal.tcl b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/mkfts3amal.tcl new file mode 100644 index 0000000000..cfea5d2ab0 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/fts3/mkfts3amal.tcl @@ -0,0 +1,116 @@ +#!/usr/bin/tclsh +# +# This script builds a single C code file holding all of FTS3 code. +# The name of the output file is fts3amal.c. To build this file, +# first do: +# +# make target_source +# +# The make target above moves all of the source code files into +# a subdirectory named "tsrc". (This script expects to find the files +# there and will not work if they are not found.) +# +# After the "tsrc" directory has been created and populated, run +# this script: +# +# tclsh mkfts3amal.tcl +# +# The amalgamated FTS3 code will be written into fts3amal.c +# + +# Open the output file and write a header comment at the beginning +# of the file. +# +set out [open fts3amal.c w] +set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1] +puts $out [subst \ +{/****************************************************************************** +** This file is an amalgamation of separate C source files from the SQLite +** Full Text Search extension 2 (fts3). By combining all the individual C +** code files into this single large file, the entire code can be compiled +** as a one translation unit. This allows many compilers to do optimizations +** that would not be possible if the files were compiled separately. It also +** makes the code easier to import into other projects. +** +** This amalgamation was generated on $today. +*/}] + +# These are the header files used by FTS3. The first time any of these +# files are seen in a #include statement in the C code, include the complete +# text of the file in-line. The file only needs to be included once. +# +foreach hdr { + fts3.h + fts3_hash.h + fts3_tokenizer.h + sqlite3.h + sqlite3ext.h +} { + set available_hdr($hdr) 1 +} + +# 78 stars used for comment formatting. +set s78 \ +{*****************************************************************************} + +# Insert a comment into the code +# +proc section_comment {text} { + global out s78 + set n [string length $text] + set nstar [expr {60 - $n}] + set stars [string range $s78 0 $nstar] + puts $out "/************** $text $stars/" +} + +# Read the source file named $filename and write it into the +# sqlite3.c output file. If any #include statements are seen, +# process them approprately. +# +proc copy_file {filename} { + global seen_hdr available_hdr out + set tail [file tail $filename] + section_comment "Begin file $tail" + set in [open $filename r] + while {![eof $in]} { + set line [gets $in] + if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { + if {[info exists available_hdr($hdr)]} { + if {$available_hdr($hdr)} { + section_comment "Include $hdr in the middle of $tail" + copy_file tsrc/$hdr + section_comment "Continuing where we left off in $tail" + } + } elseif {![info exists seen_hdr($hdr)]} { + set seen_hdr($hdr) 1 + puts $out $line + } + } elseif {[regexp {^#ifdef __cplusplus} $line]} { + puts $out "#if 0" + } elseif {[regexp {^#line} $line]} { + # Skip #line directives. + } else { + puts $out $line + } + } + close $in + section_comment "End of $tail" +} + + +# Process the source files. Process files containing commonly +# used subroutines first in order to help the compiler find +# inlining opportunities. +# +foreach file { + fts3.c + fts3_hash.c + fts3_porter.c + fts3_tokenizer.c + fts3_tokenizer1.c + fts3_icu.c +} { + copy_file tsrc/$file +} + +close $out diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/icu/README.txt b/libraries/sqlite/unix/sqlite-3.5.1/ext/icu/README.txt new file mode 100644 index 0000000000..5c995ccb4a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/icu/README.txt @@ -0,0 +1,170 @@ + +This directory contains source code for the SQLite "ICU" extension, an +integration of the "International Components for Unicode" library with +SQLite. Documentation follows. + + 1. Features + + 1.1 SQL Scalars upper() and lower() + 1.2 Unicode Aware LIKE Operator + 1.3 ICU Collation Sequences + 1.4 SQL REGEXP Operator + + 2. Compilation and Usage + + 3. Bugs, Problems and Security Issues + + 3.1 The "case_sensitive_like" Pragma + 3.2 The SQLITE_MAX_LIKE_PATTERN_LENGTH Macro + 3.3 Collation Sequence Security Issue + + +1. FEATURES + + 1.1 SQL Scalars upper() and lower() + + SQLite's built-in implementations of these two functions only + provide case mapping for the 26 letters used in the English + language. The ICU based functions provided by this extension + provide case mapping, where defined, for the full range of + unicode characters. + + ICU provides two types of case mapping, "general" case mapping and + "language specific". Refer to ICU documentation for the differences + between the two. Specifically: + + http://www.icu-project.org/userguide/caseMappings.html + http://www.icu-project.org/userguide/posix.html#case_mappings + + To utilise "general" case mapping, the upper() or lower() scalar + functions are invoked with one argument: + + upper('ABC') -> 'abc' + lower('abc') -> 'ABC' + + To access ICU "language specific" case mapping, upper() or lower() + should be invoked with two arguments. The second argument is the name + of the locale to use. Passing an empty string ("") or SQL NULL value + as the second argument is the same as invoking the 1 argument version + of upper() or lower(): + + lower('I', 'en_us') -> 'i' + lower('I', 'tr_tr') -> 'ı' (small dotless i) + + 1.2 Unicode Aware LIKE Operator + + Similarly to the upper() and lower() functions, the built-in SQLite LIKE + operator understands case equivalence for the 26 letters of the English + language alphabet. The implementation of LIKE included in this + extension uses the ICU function u_foldCase() to provide case + independent comparisons for the full range of unicode characters. + + The U_FOLD_CASE_DEFAULT flag is passed to u_foldCase(), meaning the + dotless 'I' character used in the Turkish language is considered + to be in the same equivalence class as the dotted 'I' character + used by many languages (including English). + + 1.3 ICU Collation Sequences + + A special SQL scalar function, icu_load_collation() is provided that + may be used to register ICU collation sequences with SQLite. It + is always called with exactly two arguments, the ICU locale + identifying the collation sequence to ICU, and the name of the + SQLite collation sequence to create. For example, to create an + SQLite collation sequence named "turkish" using Turkish language + sorting rules, the SQL statement: + + SELECT icu_load_collation('tr_TR', 'turkish'); + + Or, for Australian English: + + SELECT icu_load_collation('en_AU', 'australian'); + + The identifiers "turkish" and "australian" may then be used + as collation sequence identifiers in SQL statements: + + CREATE TABLE aust_turkish_penpals( + australian_penpal_name TEXT COLLATE australian, + turkish_penpal_name TEXT COLLATE turkish + ); + + 1.4 SQL REGEXP Operator + + This extension provides an implementation of the SQL binary + comparision operator "REGEXP", based on the regular expression functions + provided by the ICU library. The syntax of the operator is as described + in SQLite documentation: + + REGEXP + + This extension uses the ICU defaults for regular expression matching + behaviour. Specifically, this means that: + + * Matching is case-sensitive, + * Regular expression comments are not allowed within patterns, and + * The '^' and '$' characters match the beginning and end of the + argument, not the beginning and end of lines within + the argument. + + Even more specifically, the value passed to the "flags" parameter + of ICU C function uregex_open() is 0. + + +2 COMPILATION AND USAGE + + The easiest way to compile and use the ICU extension is to build + and use it as a dynamically loadable SQLite extension. To do this + using gcc on *nix: + + gcc -shared icu.c `icu-config --ldflags` -o libSqliteIcu.so + + You may need to add "-I" flags so that gcc can find sqlite3ext.h + and sqlite3.h. The resulting shared lib, libSqliteIcu.so, may be + loaded into sqlite in the same way as any other dynamically loadable + extension. + + +3 BUGS, PROBLEMS AND SECURITY ISSUES + + 3.1 The "case_sensitive_like" Pragma + + This extension does not work well with the "case_sensitive_like" + pragma. If this pragma is used before the ICU extension is loaded, + then the pragma has no effect. If the pragma is used after the ICU + extension is loaded, then SQLite ignores the ICU implementation and + always uses the built-in LIKE operator. + + The ICU extension LIKE operator is always case insensitive. + + 3.2 The SQLITE_MAX_LIKE_PATTERN_LENGTH Macro + + Passing very long patterns to the built-in SQLite LIKE operator can + cause a stack overflow. To curb this problem, SQLite defines the + SQLITE_MAX_LIKE_PATTERN_LENGTH macro as the maximum length of a + pattern in bytes (irrespective of encoding). The default value is + defined in internal header file "limits.h". + + The ICU extension LIKE implementation suffers from the same + problem and uses the same solution. However, since the ICU extension + code does not include the SQLite file "limits.h", modifying + the default value therein does not affect the ICU extension. + The default value of SQLITE_MAX_LIKE_PATTERN_LENGTH used by + the ICU extension LIKE operator is 50000, defined in source + file "icu.c". + + 3.3 Collation Sequence Security Issue + + Internally, SQLite assumes that indices stored in database files + are sorted according to the collation sequence indicated by the + SQL schema. Changing the definition of a collation sequence after + an index has been built is therefore equivalent to database + corruption. The SQLite library is not very well tested under + these conditions, and may contain potential buffer overruns + or other programming errors that could be exploited by a malicious + programmer. + + If the ICU extension is used in an environment where potentially + malicious users may execute arbitrary SQL (i.e. gears), they + should be prevented from invoking the icu_load_collation() function, + possibly using the authorisation callback. + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ext/icu/icu.c b/libraries/sqlite/unix/sqlite-3.5.1/ext/icu/icu.c new file mode 100644 index 0000000000..11bb116d8d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ext/icu/icu.c @@ -0,0 +1,499 @@ +/* +** 2007 May 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** $Id: icu.c,v 1.6 2007/06/22 15:21:16 danielk1977 Exp $ +** +** This file implements an integration between the ICU library +** ("International Components for Unicode", an open-source library +** for handling unicode data) and SQLite. The integration uses +** ICU to provide the following to SQLite: +** +** * An implementation of the SQL regexp() function (and hence REGEXP +** operator) using the ICU uregex_XX() APIs. +** +** * Implementations of the SQL scalar upper() and lower() functions +** for case mapping. +** +** * Integration of ICU and SQLite collation seqences. +** +** * An implementation of the LIKE operator that uses ICU to +** provide case-independent matching. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU) + +/* Include ICU headers */ +#include +#include +#include +#include + +#include + +#ifndef SQLITE_CORE + #include "sqlite3ext.h" + SQLITE_EXTENSION_INIT1 +#else + #include "sqlite3.h" +#endif + +/* +** Maximum length (in bytes) of the pattern in a LIKE or GLOB +** operator. +*/ +#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH +# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 +#endif + +/* +** Version of sqlite3_free() that is always a function, never a macro. +*/ +static void xFree(void *p){ + sqlite3_free(p); +} + +/* +** Compare two UTF-8 strings for equality where the first string is +** a "LIKE" expression. Return true (1) if they are the same and +** false (0) if they are different. +*/ +static int icuLikeCompare( + const uint8_t *zPattern, /* LIKE pattern */ + const uint8_t *zString, /* The UTF-8 string to compare against */ + const UChar32 uEsc /* The escape character */ +){ + static const int MATCH_ONE = (UChar32)'_'; + static const int MATCH_ALL = (UChar32)'%'; + + int iPattern = 0; /* Current byte index in zPattern */ + int iString = 0; /* Current byte index in zString */ + + int prevEscape = 0; /* True if the previous character was uEsc */ + + while( zPattern[iPattern]!=0 ){ + + /* Read (and consume) the next character from the input pattern. */ + UChar32 uPattern; + U8_NEXT_UNSAFE(zPattern, iPattern, uPattern); + assert(uPattern!=0); + + /* There are now 4 possibilities: + ** + ** 1. uPattern is an unescaped match-all character "%", + ** 2. uPattern is an unescaped match-one character "_", + ** 3. uPattern is an unescaped escape character, or + ** 4. uPattern is to be handled as an ordinary character + */ + if( !prevEscape && uPattern==MATCH_ALL ){ + /* Case 1. */ + uint8_t c; + + /* Skip any MATCH_ALL or MATCH_ONE characters that follow a + ** MATCH_ALL. For each MATCH_ONE, skip one character in the + ** test string. + */ + while( (c=zPattern[iPattern]) == MATCH_ALL || c == MATCH_ONE ){ + if( c==MATCH_ONE ){ + if( zString[iString]==0 ) return 0; + U8_FWD_1_UNSAFE(zString, iString); + } + iPattern++; + } + + if( zPattern[iPattern]==0 ) return 1; + + while( zString[iString] ){ + if( icuLikeCompare(&zPattern[iPattern], &zString[iString], uEsc) ){ + return 1; + } + U8_FWD_1_UNSAFE(zString, iString); + } + return 0; + + }else if( !prevEscape && uPattern==MATCH_ONE ){ + /* Case 2. */ + if( zString[iString]==0 ) return 0; + U8_FWD_1_UNSAFE(zString, iString); + + }else if( !prevEscape && uPattern==uEsc){ + /* Case 3. */ + prevEscape = 1; + + }else{ + /* Case 4. */ + UChar32 uString; + U8_NEXT_UNSAFE(zString, iString, uString); + uString = u_foldCase(uString, U_FOLD_CASE_DEFAULT); + uPattern = u_foldCase(uPattern, U_FOLD_CASE_DEFAULT); + if( uString!=uPattern ){ + return 0; + } + prevEscape = 0; + } + } + + return zString[iString]==0; +} + +/* +** Implementation of the like() SQL function. This function implements +** the build-in LIKE operator. The first argument to the function is the +** pattern and the second argument is the string. So, the SQL statements: +** +** A LIKE B +** +** is implemented as like(B, A). If there is an escape character E, +** +** A LIKE B ESCAPE E +** +** is mapped to like(B, A, E). +*/ +static void icuLikeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zA = sqlite3_value_text(argv[0]); + const unsigned char *zB = sqlite3_value_text(argv[1]); + UChar32 uEsc = 0; + + /* Limit the length of the LIKE or GLOB pattern to avoid problems + ** of deep recursion and N*N behavior in patternCompare(). + */ + if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ + sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); + return; + } + + + if( argc==3 ){ + /* The escape character string must consist of a single UTF-8 character. + ** Otherwise, return an error. + */ + int nE= sqlite3_value_bytes(argv[2]); + const unsigned char *zE = sqlite3_value_text(argv[2]); + int i = 0; + if( zE==0 ) return; + U8_NEXT(zE, i, nE, uEsc); + if( i!=nE){ + sqlite3_result_error(context, + "ESCAPE expression must be a single character", -1); + return; + } + } + + if( zA && zB ){ + sqlite3_result_int(context, icuLikeCompare(zA, zB, uEsc)); + } +} + +/* +** This function is called when an ICU function called from within +** the implementation of an SQL scalar function returns an error. +** +** The scalar function context passed as the first argument is +** loaded with an error message based on the following two args. +*/ +static void icuFunctionError( + sqlite3_context *pCtx, /* SQLite scalar function context */ + const char *zName, /* Name of ICU function that failed */ + UErrorCode e /* Error code returned by ICU function */ +){ + char zBuf[128]; + sqlite3_snprintf(128, zBuf, "ICU error: %s(): %s", zName, u_errorName(e)); + zBuf[127] = '\0'; + sqlite3_result_error(pCtx, zBuf, -1); +} + +/* +** Function to delete compiled regexp objects. Registered as +** a destructor function with sqlite3_set_auxdata(). +*/ +static void icuRegexpDelete(void *p){ + URegularExpression *pExpr = (URegularExpression *)p; + uregex_close(pExpr); +} + +/* +** Implementation of SQLite REGEXP operator. This scalar function takes +** two arguments. The first is a regular expression pattern to compile +** the second is a string to match against that pattern. If either +** argument is an SQL NULL, then NULL Is returned. Otherwise, the result +** is 1 if the string matches the pattern, or 0 otherwise. +** +** SQLite maps the regexp() function to the regexp() operator such +** that the following two are equivalent: +** +** zString REGEXP zPattern +** regexp(zPattern, zString) +** +** Uses the following ICU regexp APIs: +** +** uregex_open() +** uregex_matches() +** uregex_close() +*/ +static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){ + UErrorCode status = U_ZERO_ERROR; + URegularExpression *pExpr; + UBool res; + const UChar *zString = sqlite3_value_text16(apArg[1]); + + /* If the left hand side of the regexp operator is NULL, + ** then the result is also NULL. + */ + if( !zString ){ + return; + } + + pExpr = sqlite3_get_auxdata(p, 0); + if( !pExpr ){ + const UChar *zPattern = sqlite3_value_text16(apArg[0]); + if( !zPattern ){ + return; + } + pExpr = uregex_open(zPattern, -1, 0, 0, &status); + + if( U_SUCCESS(status) ){ + sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete); + }else{ + assert(!pExpr); + icuFunctionError(p, "uregex_open", status); + return; + } + } + + /* Configure the text that the regular expression operates on. */ + uregex_setText(pExpr, zString, -1, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "uregex_setText", status); + return; + } + + /* Attempt the match */ + res = uregex_matches(pExpr, 0, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "uregex_matches", status); + return; + } + + /* Set the text that the regular expression operates on to a NULL + ** pointer. This is not really necessary, but it is tidier than + ** leaving the regular expression object configured with an invalid + ** pointer after this function returns. + */ + uregex_setText(pExpr, 0, 0, &status); + + /* Return 1 or 0. */ + sqlite3_result_int(p, res ? 1 : 0); +} + +/* +** Implementations of scalar functions for case mapping - upper() and +** lower(). Function upper() converts it's input to upper-case (ABC). +** Function lower() converts to lower-case (abc). +** +** ICU provides two types of case mapping, "general" case mapping and +** "language specific". Refer to ICU documentation for the differences +** between the two. +** +** To utilise "general" case mapping, the upper() or lower() scalar +** functions are invoked with one argument: +** +** upper('ABC') -> 'abc' +** lower('abc') -> 'ABC' +** +** To access ICU "language specific" case mapping, upper() or lower() +** should be invoked with two arguments. The second argument is the name +** of the locale to use. Passing an empty string ("") or SQL NULL value +** as the second argument is the same as invoking the 1 argument version +** of upper() or lower(). +** +** lower('I', 'en_us') -> 'i' +** lower('I', 'tr_tr') -> 'ı' (small dotless i) +** +** http://www.icu-project.org/userguide/posix.html#case_mappings +*/ +static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){ + const UChar *zInput; + UChar *zOutput; + int nInput; + int nOutput; + + UErrorCode status = U_ZERO_ERROR; + const char *zLocale = 0; + + assert(nArg==1 || nArg==2); + if( nArg==2 ){ + zLocale = (const char *)sqlite3_value_text(apArg[1]); + } + + zInput = sqlite3_value_text16(apArg[0]); + if( !zInput ){ + return; + } + nInput = sqlite3_value_bytes16(apArg[0]); + + nOutput = nInput * 2 + 2; + zOutput = sqlite3_malloc(nOutput); + if( !zOutput ){ + return; + } + + if( sqlite3_user_data(p) ){ + u_strToUpper(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); + }else{ + u_strToLower(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); + } + + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "u_strToLower()/u_strToUpper", status); + return; + } + + sqlite3_result_text16(p, zOutput, -1, xFree); +} + +/* +** Collation sequence destructor function. The pCtx argument points to +** a UCollator structure previously allocated using ucol_open(). +*/ +static void icuCollationDel(void *pCtx){ + UCollator *p = (UCollator *)pCtx; + ucol_close(p); +} + +/* +** Collation sequence comparison function. The pCtx argument points to +** a UCollator structure previously allocated using ucol_open(). +*/ +static int icuCollationColl( + void *pCtx, + int nLeft, + const void *zLeft, + int nRight, + const void *zRight +){ + UCollationResult res; + UCollator *p = (UCollator *)pCtx; + res = ucol_strcoll(p, (UChar *)zLeft, nLeft/2, (UChar *)zRight, nRight/2); + switch( res ){ + case UCOL_LESS: return -1; + case UCOL_GREATER: return +1; + case UCOL_EQUAL: return 0; + } + assert(!"Unexpected return value from ucol_strcoll()"); + return 0; +} + +/* +** Implementation of the scalar function icu_load_collation(). +** +** This scalar function is used to add ICU collation based collation +** types to an SQLite database connection. It is intended to be called +** as follows: +** +** SELECT icu_load_collation(, ); +** +** Where is a string containing an ICU locale identifier (i.e. +** "en_AU", "tr_TR" etc.) and is the name of the +** collation sequence to create. +*/ +static void icuLoadCollation( + sqlite3_context *p, + int nArg, + sqlite3_value **apArg +){ + sqlite3 *db = (sqlite3 *)sqlite3_user_data(p); + UErrorCode status = U_ZERO_ERROR; + const char *zLocale; /* Locale identifier - (eg. "jp_JP") */ + const char *zName; /* SQL Collation sequence name (eg. "japanese") */ + UCollator *pUCollator; /* ICU library collation object */ + int rc; /* Return code from sqlite3_create_collation_x() */ + + assert(nArg==2); + zLocale = (const char *)sqlite3_value_text(apArg[0]); + zName = (const char *)sqlite3_value_text(apArg[1]); + + if( !zLocale || !zName ){ + return; + } + + pUCollator = ucol_open(zLocale, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "ucol_open", status); + return; + } + assert(p); + + rc = sqlite3_create_collation_v2(db, zName, SQLITE_UTF16, (void *)pUCollator, + icuCollationColl, icuCollationDel + ); + if( rc!=SQLITE_OK ){ + ucol_close(pUCollator); + sqlite3_result_error(p, "Error registering collation function", -1); + } +} + +/* +** Register the ICU extension functions with database db. +*/ +int sqlite3IcuInit(sqlite3 *db){ + struct IcuScalar { + const char *zName; /* Function name */ + int nArg; /* Number of arguments */ + int enc; /* Optimal text encoding */ + void *pContext; /* sqlite3_user_data() context */ + void (*xFunc)(sqlite3_context*,int,sqlite3_value**); + } scalars[] = { + {"regexp",-1, SQLITE_ANY, 0, icuRegexpFunc}, + + {"lower", 1, SQLITE_UTF16, 0, icuCaseFunc16}, + {"lower", 2, SQLITE_UTF16, 0, icuCaseFunc16}, + {"upper", 1, SQLITE_UTF16, (void*)1, icuCaseFunc16}, + {"upper", 2, SQLITE_UTF16, (void*)1, icuCaseFunc16}, + + {"lower", 1, SQLITE_UTF8, 0, icuCaseFunc16}, + {"lower", 2, SQLITE_UTF8, 0, icuCaseFunc16}, + {"upper", 1, SQLITE_UTF8, (void*)1, icuCaseFunc16}, + {"upper", 2, SQLITE_UTF8, (void*)1, icuCaseFunc16}, + + {"like", 2, SQLITE_UTF8, 0, icuLikeFunc}, + {"like", 3, SQLITE_UTF8, 0, icuLikeFunc}, + + {"icu_load_collation", 2, SQLITE_UTF8, (void*)db, icuLoadCollation}, + }; + + int rc = SQLITE_OK; + int i; + + for(i=0; rc==SQLITE_OK && i<(sizeof(scalars)/sizeof(struct IcuScalar)); i++){ + struct IcuScalar *p = &scalars[i]; + rc = sqlite3_create_function( + db, p->zName, p->nArg, p->enc, p->pContext, p->xFunc, 0, 0 + ); + } + + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3IcuInit(db); +} +#endif + +#endif diff --git a/libraries/sqlite/unix/sqlite-3.5.1/install-sh b/libraries/sqlite/unix/sqlite-3.5.1/install-sh new file mode 100755 index 0000000000..e9de23842d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/install-sh @@ -0,0 +1,251 @@ +#!/bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5 (mit/util/scripts/install.sh). +# +# Copyright 1991 by the Massachusetts Institute of Technology +# +# Permission to use, copy, modify, distribute, and sell this software and its +# documentation for any purpose is hereby granted without fee, provided that +# the above copyright notice appear in all copies and that both that +# copyright notice and this permission notice appear in supporting +# documentation, and that the name of M.I.T. not be used in advertising or +# publicity pertaining to distribution of the software without specific, +# written prior permission. M.I.T. makes no representations about the +# suitability of this software for any purpose. It is provided "as is" +# without express or implied warranty. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +transformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + chmodcmd="" + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/libraries/sqlite/unix/sqlite-3.5.1/ltmain.sh b/libraries/sqlite/unix/sqlite-3.5.1/ltmain.sh new file mode 100644 index 0000000000..8f7a6ac10d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/ltmain.sh @@ -0,0 +1,6971 @@ +# ltmain.sh - Provide generalized library-building support services. +# NOTE: Changing this file will not affect anything until you rerun configure. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +basename="s,^.*/,,g" + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + +# define SED for historic ltconfig's generated by Libtool 1.3 +test -z "$SED" && SED=sed + +# The name of this program: +progname=`echo "$progpath" | $SED $basename` +modename="$progname" + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 + +PROGRAM=ltmain.sh +PACKAGE=libtool +VERSION=1.5.22 +TIMESTAMP=" (1.1220.2.365 2005/12/18 22:14:06)" + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes. +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +# Same for EGREP, and just to be sure, do LTCC as well +if test "X$EGREP" = X ; then + EGREP=egrep +fi +if test "X$LTCC" = X ; then + LTCC=${CC-gcc} +fi + +# Check that we have a working $echo. +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then + # Yippee, $echo works! + : +else + # Restart under the correct shell, and then maybe $echo will work. + exec $SHELL "$progpath" --no-reexec ${1+"$@"} +fi + +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat <&2 + $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 + exit $EXIT_FAILURE +fi + +# Global variables. +mode=$default_mode +nonopt= +prev= +prevopt= +run= +show="$echo" +show_help= +execute_dlfiles= +duplicate_deps=no +preserve_args= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" + +if test -z "$max_cmd_len"; then + i=0 + testring="ABCD" + new_result= + + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while (test "X"`$SHELL $0 --fallback-echo "X$testring" 2>/dev/null` \ + = "XX$testring") >/dev/null 2>&1 && + new_result=`expr "X$testring" : ".*" 2>&1` && + max_cmd_len="$new_result" && + test "$i" != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + testring="$testring$testring" + done + testring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + max_cmd_len=`expr $max_cmd_len \/ 2` +fi + +##################################### +# Shell function definitions: +# This seems to be the best place for them + +# func_mktempdir [string] +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, STRING is the basename for that directory. +func_mktempdir () +{ + my_template="${TMPDIR-/tmp}/${1-$progname}" + + if test "$run" = ":"; then + # Return a directory name, but don't create it in dry-run mode + my_tmpdir="${my_template}-$$" + else + + # If mktemp works, use that first and foremost + my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` + + if test ! -d "$my_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" + + save_mktempdir_umask=`umask` + umask 0077 + $mkdir "$my_tmpdir" + umask $save_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$my_tmpdir" || { + $echo "cannot create temporary directory \`$my_tmpdir'" 1>&2 + exit $EXIT_FAILURE + } + fi + + $echo "X$my_tmpdir" | $Xsed +} + + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +func_win32_libid () +{ + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \ + $EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then + win32_nmres=`eval $NM -f posix -A $1 | \ + $SED -n -e '1,100{/ I /{s,.*,import,;p;q;};}'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $echo $win32_libid_type +} + + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + # user sometimes does CC=-gcc so we need to match that to 'gcc' + trimedcc=`echo ${CC} | $SED -e "s/${host}-//g"` + # and sometimes libtool has CC=-gcc but user does CC=gcc + extendcc=${host}-${CC} + # and sometimes libtool has CC=-gcc but user has CC=-gcc + # (Gentoo-specific hack because we always export $CHOST) + mungedcc=${CHOST-${host}}-${trimedcc} + case "$@ " in + "cc "* | " cc "* | "${host}-cc "* | " ${host}-cc "*|\ + "gcc "* | " gcc "* | "${host}-gcc "* | " ${host}-gcc "*) + tagname=CC + break ;; + "$trimedcc "* | " $trimedcc "* | "`$echo $trimedcc` "* | " `$echo $trimedcc` "*|\ + "$extendcc "* | " $extendcc "* | "`$echo $extendcc` "* | " `$echo $extendcc` "*|\ + "$mungedcc "* | " $mungedcc "* | "`$echo $mungedcc` "* | " `$echo $mungedcc` "*|\ + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + $echo "$modename: unable to infer tagged configuration" + $echo "$modename: specify a tag with \`--tag'" 1>&2 + exit $EXIT_FAILURE +# else +# $echo "$modename: using $tagname tagged configuration" + fi + ;; + esac + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + + $show "(cd $f_ex_an_ar_dir && $AR x $f_ex_an_ar_oldlib)" + $run eval "(cd \$f_ex_an_ar_dir && $AR x \$f_ex_an_ar_oldlib)" || exit $? + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + $echo "$modename: ERROR: object name conflicts: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" 1>&2 + exit $EXIT_FAILURE + fi +} + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + my_status="" + + $show "${rm}r $my_gentop" + $run ${rm}r "$my_gentop" + $show "$mkdir $my_gentop" + $run $mkdir "$my_gentop" + my_status=$? + if test "$my_status" -ne 0 && test ! -d "$my_gentop"; then + exit $my_status + fi + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + my_xlib=`$echo "X$my_xlib" | $Xsed -e 's%^.*/%%'` + my_xdir="$my_gentop/$my_xlib" + + $show "${rm}r $my_xdir" + $run ${rm}r "$my_xdir" + $show "$mkdir $my_xdir" + $run $mkdir "$my_xdir" + exit_status=$? + if test "$exit_status" -ne 0 && test ! -d "$my_xdir"; then + exit $exit_status + fi + case $host in + *-darwin*) + $show "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + if test -z "$run"; then + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`$echo "X$darwin_archive" | $Xsed -e 's%^.*/%%'` + darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null` + if test -n "$darwin_arches"; then + darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + $show "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $rm "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we have a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print| xargs basename | sort -u | $NL2SP` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` + lipo -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + ${rm}r unfat-$$ + cd "$darwin_orig_dir" + else + cd "$darwin_orig_dir" + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + fi # $run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` + done + func_extract_archives_result="$my_oldobjs" +} +# End of Shell function definitions +##################################### + +# Darwin sucks +eval std_shrext=\"$shrext_cmds\" + +disable_libs=no + +# Parse our command line options once, thoroughly. +while test "$#" -gt 0 +do + arg="$1" + shift + + case $arg in + -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;; + *) optarg= ;; + esac + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + execute_dlfiles) + execute_dlfiles="$execute_dlfiles $arg" + ;; + tag) + tagname="$arg" + preserve_args="${preserve_args}=$arg" + + # Check whether tagname contains only valid characters + case $tagname in + *[!-_A-Za-z0-9,/]*) + $echo "$progname: invalid tag name: $tagname" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $tagname in + CC) + # Don't test for the "default" C tag, as we know, it's there, but + # not specially marked. + ;; + *) + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then + taglist="$taglist $tagname" + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`" + else + $echo "$progname: ignoring unknown tag $tagname" 1>&2 + fi + ;; + esac + ;; + *) + eval "$prev=\$arg" + ;; + esac + + prev= + prevopt= + continue + fi + + # Have we seen a non-optional argument yet? + case $arg in + --help) + show_help=yes + ;; + + --version) + $echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP" + $echo + $echo "Copyright (C) 2005 Free Software Foundation, Inc." + $echo "This is free software; see the source for copying conditions. There is NO" + $echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + exit $? + ;; + + --config) + ${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath + # Now print the configurations for the tags. + for tagname in $taglist; do + ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath" + done + exit $? + ;; + + --debug) + $echo "$progname: enabling shell trace mode" + set -x + preserve_args="$preserve_args $arg" + ;; + + --dry-run | -n) + run=: + ;; + + --features) + $echo "host: $host" + if test "$build_libtool_libs" = yes; then + $echo "enable shared libraries" + else + $echo "disable shared libraries" + fi + if test "$build_old_libs" = yes; then + $echo "enable static libraries" + else + $echo "disable static libraries" + fi + exit $? + ;; + + --finish) mode="finish" ;; + + --mode) prevopt="--mode" prev=mode ;; + --mode=*) mode="$optarg" ;; + + --preserve-dup-deps) duplicate_deps="yes" ;; + + --quiet | --silent) + show=: + preserve_args="$preserve_args $arg" + ;; + + --tag) + prevopt="--tag" + prev=tag + preserve_args="$preserve_args --tag" + ;; + --tag=*) + set tag "$optarg" ${1+"$@"} + shift + prev=tag + preserve_args="$preserve_args --tag" + ;; + + -dlopen) + prevopt="-dlopen" + prev=execute_dlfiles + ;; + + -*) + $echo "$modename: unrecognized option \`$arg'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + + *) + nonopt="$arg" + break + ;; + esac +done + +if test -n "$prevopt"; then + $echo "$modename: option \`$prevopt' requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE +fi + +case $disable_libs in +no) + ;; +shared) + build_libtool_libs=no + build_old_libs=yes + ;; +static) + build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` + ;; +esac + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +if test -z "$show_help"; then + + # Infer the operation mode. + if test -z "$mode"; then + $echo "*** Warning: inferring the mode of operation is deprecated." 1>&2 + $echo "*** Future versions of Libtool will require --mode=MODE be specified." 1>&2 + case $nonopt in + *cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*) + mode=link + for arg + do + case $arg in + -c) + mode=compile + break + ;; + esac + done + ;; + *db | *dbx | *strace | *truss) + mode=execute + ;; + *install*|cp|mv) + mode=install + ;; + *rm) + mode=uninstall + ;; + *) + # If we have no mode, but dlfiles were specified, then do execute mode. + test -n "$execute_dlfiles" && mode=execute + + # Just use the default operation mode. + if test -z "$mode"; then + if test -n "$nonopt"; then + $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2 + else + $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2 + fi + fi + ;; + esac + fi + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$execute_dlfiles" && test "$mode" != execute; then + $echo "$modename: unrecognized option \`-dlopen'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$modename --help --mode=$mode' for more information." + + # These modes are in order of execution frequency so that they run quickly. + case $mode in + # libtool compile mode + compile) + modename="$modename: compile" + # Get the compilation command and the source file. + base_compile= + srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg="$arg" + arg_mode=normal + ;; + + target ) + libobj="$arg" + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + if test -n "$libobj" ; then + $echo "$modename: you cannot specify \`-o' more than once" 1>&2 + exit $EXIT_FAILURE + fi + arg_mode=target + continue + ;; + + -static | -prefer-pic | -prefer-non-pic) + later="$later $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"` + lastarg= + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" + + # Double-quote args containing other shell metacharacters. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + lastarg="$lastarg $arg" + done + IFS="$save_ifs" + lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"` + + # Add the arguments to base_compile. + base_compile="$base_compile $lastarg" + continue + ;; + + * ) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg="$srcfile" + srcfile="$arg" + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"` + + case $lastarg in + # Double-quote args containing other shell metacharacters. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, and some SunOS ksh mistreat backslash-escaping + # in scan sets (worked around with variable expansion), + # and furthermore cannot handle '|' '&' '(' ')' in scan sets + # at all, so we specify them separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + lastarg="\"$lastarg\"" + ;; + esac + + base_compile="$base_compile $lastarg" + done # for arg + + case $arg_mode in + arg) + $echo "$modename: you must specify an argument for -Xcompile" + exit $EXIT_FAILURE + ;; + target) + $echo "$modename: you must specify a target with \`-o'" 1>&2 + exit $EXIT_FAILURE + ;; + *) + # Get the name of the library object. + [ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'` + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + xform='[cCFSifmso]' + case $libobj in + *.ada) xform=ada ;; + *.adb) xform=adb ;; + *.ads) xform=ads ;; + *.asm) xform=asm ;; + *.c++) xform=c++ ;; + *.cc) xform=cc ;; + *.ii) xform=ii ;; + *.class) xform=class ;; + *.cpp) xform=cpp ;; + *.cxx) xform=cxx ;; + *.f90) xform=f90 ;; + *.for) xform=for ;; + *.java) xform=java ;; + esac + + libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"` + + case $libobj in + *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;; + *) + $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -static) + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + qlibobj=`$echo "X$libobj" | $Xsed -e "$sed_quote_subst"` + case $qlibobj in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qlibobj="\"$qlibobj\"" ;; + esac + test "X$libobj" != "X$qlibobj" \ + && $echo "X$libobj" | grep '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && $echo "$modename: libobj name \`$libobj' may not contain shell special characters." + objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` + xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$obj"; then + xdir= + else + xdir=$xdir/ + fi + lobj=${xdir}$objdir/$objname + + if test -z "$base_compile"; then + $echo "$modename: you must specify a compilation command" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + $run $rm $removelist + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + removelist="$removelist $output_obj $lockfile" + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $run ln "$srcfile" "$lockfile" 2>/dev/null; do + $show "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $echo "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + $echo "$srcfile" > "$lockfile" + fi + + if test -n "$fix_srcfile_path"; then + eval srcfile=\"$fix_srcfile_path\" + fi + qsrcfile=`$echo "X$srcfile" | $Xsed -e "$sed_quote_subst"` + case $qsrcfile in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qsrcfile="\"$qsrcfile\"" ;; + esac + + $run $rm "$libobj" "${libobj}T" + + # Create a libtool object file (analogous to a ".la" file), + # but don't create it if we're doing a dry run. + test -z "$run" && cat > ${libobj}T </dev/null`" != "X$srcfile"; then + $echo "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + $show "$mv $output_obj $lobj" + if $run $mv $output_obj $lobj; then : + else + error=$? + $run $rm $removelist + exit $error + fi + fi + + # Append the name of the PIC object to the libtool object file. + test -z "$run" && cat >> ${libobj}T <> ${libobj}T </dev/null`" != "X$srcfile"; then + $echo "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + $show "$mv $output_obj $obj" + if $run $mv $output_obj $obj; then : + else + error=$? + $run $rm $removelist + exit $error + fi + fi + + # Append the name of the non-PIC object the libtool object file. + # Only append if the libtool object file exists. + test -z "$run" && cat >> ${libobj}T <> ${libobj}T <&2 + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + else + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + fi + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test + ;; + *) qarg=$arg ;; + esac + libtool_args="$libtool_args $qarg" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + compile_command="$compile_command @OUTPUT@" + finalize_command="$finalize_command @OUTPUT@" + ;; + esac + + case $prev in + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + compile_command="$compile_command @SYMFILE@" + finalize_command="$finalize_command @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + dlfiles="$dlfiles $arg" + else + dlprefiles="$dlprefiles $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + if test ! -f "$arg"; then + $echo "$modename: symbol file \`$arg' does not exist" + exit $EXIT_FAILURE + fi + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat $save_arg` + do +# moreargs="$moreargs $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + pic_object= + non_pic_object= + + # Read the .lo file + # If there is no directory component, then add one. + case $arg in + */* | *\\*) . $arg ;; + *) . ./$arg ;; + esac + + if test -z "$pic_object" || \ + test -z "$non_pic_object" || + test "$pic_object" = none && \ + test "$non_pic_object" = none; then + $echo "$modename: cannot find name of object for \`$arg'" 1>&2 + exit $EXIT_FAILURE + fi + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + libobjs="$libobjs $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + non_pic_objects="$non_pic_objects $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if test -z "$run"; then + $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 + exit $EXIT_FAILURE + else + # Dry-run case. + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` + non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` + libobjs="$libobjs $pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + fi + done + else + $echo "$modename: link input file \`$save_arg' does not exist" + exit $EXIT_FAILURE + fi + arg=$save_arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + $echo "$modename: only absolute run-paths are allowed" 1>&2 + exit $EXIT_FAILURE + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) rpath="$rpath $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) xrpath="$xrpath $arg" ;; + esac + fi + prev= + continue + ;; + xcompiler) + compiler_flags="$compiler_flags $qarg" + prev= + compile_command="$compile_command $qarg" + finalize_command="$finalize_command $qarg" + continue + ;; + xlinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $wl$qarg" + prev= + compile_command="$compile_command $wl$qarg" + finalize_command="$finalize_command $wl$qarg" + continue + ;; + xcclinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $qarg" + prev= + compile_command="$compile_command $qarg" + finalize_command="$finalize_command $qarg" + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + darwin_framework|darwin_framework_skip) + test "$prev" = "darwin_framework" && compiler_flags="$compiler_flags $arg" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + prev= + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + compile_command="$compile_command $link_static_flag" + finalize_command="$finalize_command $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2 + continue + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + $echo "$modename: more than one -exported-symbols argument is not allowed" + exit $EXIT_FAILURE + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework|-arch|-isysroot) + case " $CC " in + *" ${arg} ${1} "* | *" ${arg} ${1} "*) + prev=darwin_framework_skip ;; + *) compiler_flags="$compiler_flags $arg" + prev=darwin_framework ;; + esac + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + ;; + esac + continue + ;; + + -L*) + dir=`$echo "X$arg" | $Xsed -e 's/^-L//'` + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2 + absdir="$dir" + notinst_path="$notinst_path $dir" + fi + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "*) ;; + *) + deplibs="$deplibs -L$dir" + lib_search_path="$lib_search_path $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + testbindir=`$echo "X$dir" | $Xsed -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + *) dllsearchpath="$dllsearchpath:$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + deplibs="$deplibs -framework System" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + deplibs="$deplibs $arg" + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + -model) + compile_command="$compile_command $arg" + compiler_flags="$compiler_flags $arg" + finalize_command="$finalize_command $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + compiler_flags="$compiler_flags $arg" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # -64, -mips[0-9] enable 64-bit mode on the SGI compiler + # -r[0-9][0-9]* specifies the processor on the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler + # +DA*, +DD* enable 64-bit mode on the HP compiler + # -q* pass through compiler args for the IBM compiler + # -m* pass through architecture-specific compiler args for GCC + # -m*, -t[45]*, -txscale* pass through architecture-specific + # compiler args for GCC + # -pg pass through profiling flag for GCC + # @file GCC response files + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*|-pg| \ + -t[45]*|-txscale*|@*) + + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + compiler_flags="$compiler_flags $arg" + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + # The PATH hackery in wrapper scripts is required on Windows + # in order for the loader to find any dlls it needs. + $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2 + $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2 + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + dir=`$echo "X$arg" | $Xsed -e 's/^-R//'` + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + $echo "$modename: only absolute run-paths are allowed" 1>&2 + exit $EXIT_FAILURE + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + continue + ;; + + -static) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -Wc,*) + args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'` + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + case $flag in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + flag="\"$flag\"" + ;; + esac + arg="$arg $wl$flag" + compiler_flags="$compiler_flags $flag" + done + IFS="$save_ifs" + arg=`$echo "X$arg" | $Xsed -e "s/^ //"` + ;; + + -Wl,*) + args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'` + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + case $flag in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + flag="\"$flag\"" + ;; + esac + arg="$arg $wl$flag" + compiler_flags="$compiler_flags $wl$flag" + linker_flags="$linker_flags $flag" + done + IFS="$save_ifs" + arg=`$echo "X$arg" | $Xsed -e "s/^ //"` + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # Some other compiler flag. + -* | +*) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + ;; + + *.$objext) + # A standard object. + objs="$objs $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + pic_object= + non_pic_object= + + # Read the .lo file + # If there is no directory component, then add one. + case $arg in + */* | *\\*) . $arg ;; + *) . ./$arg ;; + esac + + if test -z "$pic_object" || \ + test -z "$non_pic_object" || + test "$pic_object" = none && \ + test "$non_pic_object" = none; then + $echo "$modename: cannot find name of object for \`$arg'" 1>&2 + exit $EXIT_FAILURE + fi + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + libobjs="$libobjs $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + non_pic_objects="$non_pic_objects $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if test -z "$run"; then + $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 + exit $EXIT_FAILURE + else + # Dry-run case. + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` + non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` + libobjs="$libobjs $pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + fi + ;; + + *.$libext) + # An archive. + deplibs="$deplibs $arg" + old_deplibs="$old_deplibs $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + dlfiles="$dlfiles $arg" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + dlprefiles="$dlprefiles $arg" + prev= + else + deplibs="$deplibs $arg" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + fi + done # argument parsing loop + + if test -n "$prev"; then + $echo "$modename: the \`$prevarg' option requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'` + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'` + if test "X$output_objdir" = "X$output"; then + output_objdir="$objdir" + else + output_objdir="$output_objdir/$objdir" + fi + # Create the object directory. + if test ! -d "$output_objdir"; then + $show "$mkdir $output_objdir" + $run $mkdir $output_objdir + exit_status=$? + if test "$exit_status" -ne 0 && test ! -d "$output_objdir"; then + exit $exit_status + fi + fi + + # Determine the type of output + case $output in + "") + $echo "$modename: you must specify an output file" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + case $host in + *cygwin* | *mingw* | *pw32*) + # don't eliminate duplications in $postdeps and $predeps + duplicate_compiler_generated_deps=yes + ;; + *) + duplicate_compiler_generated_deps=$duplicate_deps + ;; + esac + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if test "X$duplicate_deps" = "Xyes" ; then + case "$libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + libs="$libs $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; + esac + pre_post_deps="$pre_post_deps $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + case $linkmode in + lib) + passes="conv link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2 + exit $EXIT_FAILURE + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + for pass in $passes; do + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + compiler_flags="$compiler_flags $deplib" + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + $echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2 + continue + fi + name=`$echo "X$deplib" | $Xsed -e 's/^-l//'` + for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if (${SED} -e '2q' $lib | + grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + library_names= + old_library= + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` + test "X$ladir" = "X$lib" && ladir="." + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` + ;; + *) + $echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2 + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'` + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) lib="$deplib" ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method + match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` + if eval $echo \"$deplib\" 2>/dev/null \ + | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + $echo + $echo "*** Warning: Trying to link with static lib archive $deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because the file extensions .$libext of this argument makes me believe" + $echo "*** that it is just a static archive that I should not used here." + else + $echo + $echo "*** Warning: Linking the shared library $output against the" + $echo "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + newdlprefiles="$newdlprefiles $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + newdlfiles="$newdlfiles $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + if test "$found" = yes || test -f "$lib"; then : + else + $echo "$modename: cannot find the library \`$lib' or unhandled argument \`$deplib'" 1>&2 + exit $EXIT_FAILURE + fi + + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + + ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` + test "X$ladir" = "X$lib" && ladir="." + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && dlfiles="$dlfiles $dlopen" + test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + # It is a libtool convenience library, so add in its objects. + convenience="$convenience $ladir/$objdir/$old_library" + old_convenience="$old_convenience $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + elif test "$linkmode" != prog && test "$linkmode" != lib; then + $echo "$modename: \`$lib' is not a convenience library" 1>&2 + exit $EXIT_FAILURE + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + for l in $old_library $library_names; do + linklib="$l" + done + if test -z "$linklib"; then + $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + dlprefiles="$dlprefiles $lib $dependency_libs" + else + newdlfiles="$newdlfiles $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2 + $echo "$modename: passing it literally to the linker, although it might fail" 1>&2 + abs_ladir="$ladir" + fi + ;; + esac + laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + $echo "$modename: warning: library \`$lib' was moved." 1>&2 + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$libdir" + absdir="$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + fi + fi # $installed = yes + name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir"; then + $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + newdlprefiles="$newdlprefiles $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + newdlprefiles="$newdlprefiles $dir/$dlname" + else + newdlprefiles="$newdlprefiles $dir/$linklib" + fi + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + newlib_search_path="$newlib_search_path $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { test "$prefer_static_libs" = no || test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath " in + *" $dir "*) ;; + *" $absdir "*) ;; + *) temp_rpath="$temp_rpath $absdir" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes ; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + if test "$installed" = no; then + notinst_deplibs="$notinst_deplibs $lib" + need_relink=yes + fi + # This is a shared library + + # Warn about portability, can't link against -module's on + # some systems (darwin) + if test "$shouldnotlink" = yes && test "$pass" = link ; then + $echo + if test "$linkmode" = prog; then + $echo "*** Warning: Linking the executable $output against the loadable module" + else + $echo "*** Warning: Linking the shared library $output against the loadable module" + fi + $echo "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + realname="$2" + shift; shift + libname=`eval \\$echo \"$libname_spec\"` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw*) + major=`expr $current - $age` + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + soname=`$echo $soroot | ${SED} -e 's/^.*\///'` + newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a" + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + $show "extracting exported symbol list from \`$soname'" + save_ifs="$IFS"; IFS='~' + cmds=$extract_expsyms_cmds + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + $show "generating import library for \`$soname'" + save_ifs="$IFS"; IFS='~' + cmds=$old_archive_from_expsyms_cmds + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a module then we can not link against + # it, someone is ignoring the new warnings I added + if /usr/bin/file -L $add 2> /dev/null | + $EGREP ": [^:]* bundle" >/dev/null ; then + $echo "** Warning, lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + $echo + $echo "** And there doesn't seem to be a static archive available" + $echo "** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$dir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + $echo "$modename: configuration error: unsupported hardcode properties" + exit $EXIT_FAILURE + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && \ + test "$hardcode_minus_L" != yes && \ + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + $echo + $echo "*** Warning: This system can not link to static lib archive $lib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + $echo "*** But as you try to build a module library, libtool will still create " + $echo "*** a static module, that should work as long as the dlopening application" + $echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + $echo + $echo "*** However, this would only work if libtool was able to extract symbol" + $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + $echo "*** not find such a program. So, this module is probably useless." + $echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'` + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) xrpath="$xrpath $temp_xrpath";; + esac;; + *) temp_deplibs="$temp_deplibs $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + newlib_search_path="$newlib_search_path $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + case $deplib in + -L*) path="$deplib" ;; + *.la) + dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$deplib" && dir="." + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2 + absdir="$dir" + fi + ;; + esac + if grep "^installed=no" $deplib > /dev/null; then + path="$absdir/$objdir" + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + if test -z "$libdir"; then + $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + if test "$absdir" != "$libdir"; then + $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2 + fi + path="$absdir" + fi + depdepl= + case $host in + *-*-darwin*) + # we do not want to link against static libs, + # but need to link against shared + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$path/$depdepl" ; then + depdepl="$path/$depdepl" + fi + # do not add paths which are already there + case " $newlib_search_path " in + *" $path "*) ;; + *) newlib_search_path="$newlib_search_path $path";; + esac + fi + path="" + ;; + *) + path="-L$path" + ;; + esac + ;; + -l*) + case $host in + *-*-darwin*) + # Again, we only want to link against shared libraries + eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"` + for tmp in $newlib_search_path ; do + if test -f "$tmp/lib$tmp_libs.dylib" ; then + eval depdepl="$tmp/lib$tmp_libs.dylib" + break + fi + done + path="" + ;; + *) continue ;; + esac + ;; + *) continue ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + case " $deplibs " in + *" $depdepl "*) ;; + *) deplibs="$depdepl $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) lib_search_path="$lib_search_path $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + tmp_libs="$tmp_libs $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$deplibs"; then + $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2 + fi + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2 + fi + + if test -n "$rpath"; then + $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2 + fi + + if test -n "$xrpath"; then + $echo "$modename: warning: \`-R' is ignored for archives" 1>&2 + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for archives" 1>&2 + fi + + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2 + fi + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + objs="$objs$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + if test "$module" = no; then + $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1 + exit $EXIT_FAILURE + else + $echo + $echo "*** Warning: Linking the shared library $output against the non-libtool" + $echo "*** objects $objs is not portable!" + libobjs="$libobjs $objs" + fi + fi + + if test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2 + fi + + set dummy $rpath + if test "$#" -gt 2; then + $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2 + fi + install_libdir="$2" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2 + fi + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + IFS="$save_ifs" + + if test -n "$8"; then + $echo "$modename: too many parameters to \`-version-info'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$2" + number_minor="$3" + number_revision="$4" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + darwin|linux|osf|windows) + current=`expr $number_major + $number_minor` + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + current=`expr $number_major + $number_minor - 1` + age="$number_minor" + revision="$number_minor" + ;; + esac + ;; + no) + current="$2" + revision="$3" + age="$4" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: CURRENT \`$current' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: REVISION \`$revision' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: AGE \`$age' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + if test "$age" -gt "$current"; then + $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + major=.`expr $current - $age` + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + minor_current=`expr $current + 1` + verstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current"; + ;; + + irix | nonstopux) + major=`expr $current - $age + 1` + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + iface=`expr $revision - $loop` + loop=`expr $loop - 1` + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) + major=.`expr $current - $age` + versuffix="$major.$age.$revision" + ;; + + osf) + major=.`expr $current - $age` + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + iface=`expr $current - $loop` + loop=`expr $loop - 1` + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + verstring="$verstring:${current}.0" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + major=`expr $current - $age` + versuffix="-$major" + ;; + + *) + $echo "$modename: unknown library version type \`$version_type'" 1>&2 + $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 + exit $EXIT_FAILURE + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2 + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + fi + + if test "$mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$echo "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + removelist="$removelist $p" + ;; + *) ;; + esac + done + if test -n "$removelist"; then + $show "${rm}r $removelist" + $run ${rm}r $removelist + fi + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + oldlibs="$oldlibs $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + for path in $notinst_path; do + lib_search_path=`$echo "$lib_search_path " | ${SED} -e "s% $path % %g"` + deplibs=`$echo "$deplibs " | ${SED} -e "s% -L$path % %g"` + dependency_libs=`$echo "$dependency_libs " | ${SED} -e "s% -L$path % %g"` + done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + temp_xrpath="$temp_xrpath -R$libdir" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) dlfiles="$dlfiles $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) dlprefiles="$dlprefiles $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + deplibs="$deplibs -framework System" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + deplibs="$deplibs -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $rm conftest.c + cat > conftest.c </dev/null` + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null \ + | grep " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; + esac + done + # It is ok to link against an archive when + # building a shared library. + if $AR -t $potlib > /dev/null 2>&1; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \ + | ${SED} 10q \ + | $EGREP "$file_magic_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $echo + $echo "*** Warning: linker path does not have real file for library $a_deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $echo "*** with $libname but no candidates were found. (...for file magic test)" + else + $echo "*** with $libname and none of the candidates passed a file format test" + $echo "*** using a file magic. Last file checked: $potlib" + fi + fi + else + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + fi + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method + match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` + for a_deplib in $deplibs; do + name=`expr $a_deplib : '-l\(.*\)'` + # If $name is empty we are operating on a -L argument. + if test -n "$name" && test "$name" != "0"; then + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval \\$echo \"$libname_spec\"` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval $echo \"$potent_lib\" 2>/dev/null \ + | ${SED} 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $echo + $echo "*** Warning: linker path does not have real file for library $a_deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $echo "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $echo "*** with $libname and none of the candidates passed a file format test" + $echo "*** using a regex pattern. Last file checked: $potlib" + fi + fi + else + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + fi + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$echo "X $deplibs" | $Xsed -e 's/ -lc$//' \ + -e 's/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$echo "X $tmp_deplibs" | ${SED} -e "1s,^X,," -e "s,$i,,"` + done + fi + if $echo "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' \ + | grep . >/dev/null; then + $echo + if test "X$deplibs_check_method" = "Xnone"; then + $echo "*** Warning: inter-library dependencies are not supported in this platform." + else + $echo "*** Warning: inter-library dependencies are not known to be supported." + fi + $echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + fi + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + $echo + $echo "*** Warning: libtool could not satisfy all declared inter-library" + $echo "*** dependencies of module $libname. Therefore, libtool will create" + $echo "*** a static module, that should work as long as the dlopening" + $echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + $echo + $echo "*** However, this would only work if libtool was able to extract symbol" + $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + $echo "*** not find such a program. So, this module is probably useless." + $echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + $echo "*** The inter-library dependencies that have been dropped here will be" + $echo "*** automatically added whenever a program is linked with this library" + $echo "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + $echo + $echo "*** Since this library must not contain undefined symbols," + $echo "*** because either the platform does not support them or" + $echo "*** it was explicitly requested with -no-undefined," + $echo "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; + esac + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + deplibs="$new_libs" + + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + dep_rpath="$dep_rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + if test -n "$hardcode_libdir_flag_spec_ld"; then + eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" + else + eval dep_rpath=\"$hardcode_libdir_flag_spec\" + fi + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + realname="$2" + shift; shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + linknames= + for link + do + linknames="$linknames $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + $show "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $run $rm $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + if len=`expr "X$cmd" : ".*"` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + $show "$cmd" + $run eval "$cmd" || exit $? + skipped_export=false + else + # The command line is too long to execute in one step. + $show "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex"; then + $show "$EGREP -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\"" + $run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + $show "$mv \"${export_symbols}T\" \"$export_symbols\"" + $run eval '$mv "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"' + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + tmp_deplibs="$tmp_deplibs $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + else + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + libobjs="$libobjs $func_extract_archives_result" + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + linker_flags="$linker_flags $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise. + $echo "creating reloadable object files..." + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + output_la=`$echo "X$output" | $Xsed -e "$basename"` + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + delfiles= + last_robj= + k=1 + output=$output_objdir/$output_la-${k}.$objext + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + eval test_cmds=\"$reload_cmds $objlist $last_robj\" + if test "X$objlist" = X || + { len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len"; }; then + objlist="$objlist $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + eval concat_cmds=\"$reload_cmds $objlist $last_robj\" + else + # All subsequent reloadable object files will link in + # the last one created. + eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + k=`expr $k + 1` + output=$output_objdir/$output_la-${k}.$objext + objlist=$obj + len=1 + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" + + if ${skipped_export-false}; then + $show "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $run $rm $export_symbols + libobjs=$output + # Append the command to create the export file. + eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\" + fi + + # Set up a command to remove the reloadable object files + # after they are used. + i=0 + while test "$i" -lt "$k" + do + i=`expr $i + 1` + delfiles="$delfiles $output_objdir/$output_la-${i}.$objext" + done + + $echo "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + + # Append the command to remove the reloadable object files + # to the just-reset $cmds. + eval cmds=\"\$cmds~\$rm $delfiles\" + fi + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + $show "${rm}r $gentop" + $run ${rm}r "$gentop" + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)" + $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $? + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$deplibs"; then + $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2 + fi + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2 + fi + + if test -n "$rpath"; then + $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2 + fi + + if test -n "$xrpath"; then + $echo "$modename: warning: \`-R' is ignored for objects" 1>&2 + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for objects" 1>&2 + fi + + case $output in + *.lo) + if test -n "$objs$old_deplibs"; then + $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2 + exit $EXIT_FAILURE + fi + libobj="$output" + obj=`$echo "X$output" | $Xsed -e "$lo2o"` + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $run $rm $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\" + else + gentop="$output_objdir/${obj}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + cmds=$reload_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $run eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + cmds=$reload_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;; + esac + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for programs" 1>&2 + fi + + if test "$preload" = yes; then + if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown && + test "$dlopen_self_static" = unknown; then + $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support." + fi + fi + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'` + finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'` + ;; + esac + + case $host in + *darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + if test "$tagname" = CXX ; then + compile_command="$compile_command ${wl}-bind_at_load" + finalize_command="$finalize_command ${wl}-bind_at_load" + fi + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; + esac + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + + compile_command="$compile_command $compile_deplibs" + finalize_command="$finalize_command $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + testbindir=`$echo "X$libdir" | $Xsed -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + *) dllsearchpath="$dllsearchpath:$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + fi + + dlsyms= + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + dlsyms="${outputname}S.c" + else + $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2 + fi + fi + + if test -n "$dlsyms"; then + case $dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${outputname}.nm" + + $show "$rm $nlist ${nlist}S ${nlist}T" + $run $rm "$nlist" "${nlist}S" "${nlist}T" + + # Parse the name list into a source file. + $show "creating $output_objdir/$dlsyms" + + test -z "$run" && $echo > "$output_objdir/$dlsyms" "\ +/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */ +/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +/* Prevent the only kind of declaration conflicts we can make. */ +#define lt_preloaded_symbols some_other_symbol + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + $show "generating symbol list for \`$output'" + + test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + for arg in $progfiles; do + $show "extracting global C symbols from \`$arg'" + $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + $run eval '$mv "$nlist"T "$nlist"' + fi + + if test -n "$export_symbols_regex"; then + $run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + $run eval '$mv "$nlist"T "$nlist"' + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $run $rm $export_symbols + $run eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* ) + $run eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + $run eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + else + $run eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + $run eval 'grep -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + $run eval 'mv "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* ) + $run eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + $run eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + fi + fi + + for arg in $dlprefiles; do + $show "extracting global C symbols from \`$arg'" + name=`$echo "$arg" | ${SED} -e 's%^.*/%%'` + $run eval '$echo ": $name " >> "$nlist"' + $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" + done + + if test -z "$run"; then + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $mv "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if grep -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + grep -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"' + else + $echo '/* NONE */' >> "$output_objdir/$dlsyms" + fi + + $echo >> "$output_objdir/$dlsyms" "\ + +#undef lt_preloaded_symbols + +#if defined (__STDC__) && __STDC__ +# define lt_ptr void * +#else +# define lt_ptr char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +" + + case $host in + *cygwin* | *mingw* ) + $echo >> "$output_objdir/$dlsyms" "\ +/* DATA imports from DLLs on WIN32 can't be const, because + runtime relocations are performed -- see ld's documentation + on pseudo-relocs */ +struct { +" + ;; + * ) + $echo >> "$output_objdir/$dlsyms" "\ +const struct { +" + ;; + esac + + + $echo >> "$output_objdir/$dlsyms" "\ + const char *name; + lt_ptr address; +} +lt_preloaded_symbols[] = +{\ +" + + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms" + + $echo >> "$output_objdir/$dlsyms" "\ + {0, (lt_ptr) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + fi + + pic_flag_for_symtable= + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + case "$compile_command " in + *" -static "*) ;; + *) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";; + esac;; + *-*-hpux*) + case "$compile_command " in + *" -static "*) ;; + *) pic_flag_for_symtable=" $pic_flag";; + esac + esac + + # Now compile the dynamic symbol file. + $show "(cd $output_objdir && $LTCC $LTCFLAGS -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")" + $run eval '(cd $output_objdir && $LTCC $LTCFLAGS -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $? + + # Clean up the generated files. + $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T" + $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T" + + # Transform the symbol file into the correct name. + case $host in + *cygwin* | *mingw* ) + if test -f "$output_objdir/${outputname}.def" ; then + compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}.def $output_objdir/${outputname}S.${objext}%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}.def $output_objdir/${outputname}S.${objext}%"` + else + compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + fi + ;; + * ) + compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + ;; + esac + ;; + *) + $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2 + exit $EXIT_FAILURE + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` + fi + + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + # Replace the output file specification. + compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + $show "$link_command" + $run eval "$link_command" + exit_status=$? + + # Delete the generated files. + if test -n "$dlsyms"; then + $show "$rm $output_objdir/${outputname}S.${objext}" + $run $rm "$output_objdir/${outputname}S.${objext}" + fi + + exit $exit_status + fi + + if test -n "$shlibpath_var"; then + # We should set the shlibpath_var + rpath= + for dir in $temp_rpath; do + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) + # Absolute path. + rpath="$rpath$dir:" + ;; + *) + # Relative path: add a thisdir entry. + rpath="$rpath\$thisdir/$dir:" + ;; + esac + done + temp_rpath="$rpath" + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + rpath="$rpath$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $run $rm $output + # Link the executable and exit + $show "$link_command" + $run eval "$link_command" || exit $? + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2 + $echo "$modename: \`$output' will be relinked during installation" 1>&2 + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname + + $show "$link_command" + $run eval "$link_command" || exit $? + + # Now create the wrapper script. + $show "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` + relink_command="$var=\"$var_value\"; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` + fi + + # Quote $echo for shipping. + if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then + case $progpath in + [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; + *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; + esac + qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"` + else + qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"` + fi + + # Only actually do things if our run command is non-null. + if test -z "$run"; then + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + output_name=`basename $output` + output_path=`dirname $output` + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $rm $cwrappersource $cwrapper + trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + cat > $cwrappersource <> $cwrappersource<<"EOF" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +/* -DDEBUG is fairly common in CFLAGS. */ +#undef DEBUG +#if defined DEBUGWRAPPER +# define DEBUG(format, ...) fprintf(stderr, format, __VA_ARGS__) +#else +# define DEBUG(format, ...) +#endif + +const char *program_name = NULL; + +void * xmalloc (size_t num); +char * xstrdup (const char *string); +const char * base_name (const char *name); +char * find_executable(const char *wrapper); +int check_executable(const char *path); +char * strendzap(char *str, const char *pat); +void lt_fatal (const char *message, ...); + +int +main (int argc, char *argv[]) +{ + char **newargz; + int i; + + program_name = (char *) xstrdup (base_name (argv[0])); + DEBUG("(main) argv[0] : %s\n",argv[0]); + DEBUG("(main) program_name : %s\n",program_name); + newargz = XMALLOC(char *, argc+2); +EOF + + cat >> $cwrappersource <> $cwrappersource <<"EOF" + newargz[1] = find_executable(argv[0]); + if (newargz[1] == NULL) + lt_fatal("Couldn't find %s", argv[0]); + DEBUG("(main) found exe at : %s\n",newargz[1]); + /* we know the script has the same name, without the .exe */ + /* so make sure newargz[1] doesn't end in .exe */ + strendzap(newargz[1],".exe"); + for (i = 1; i < argc; i++) + newargz[i+1] = xstrdup(argv[i]); + newargz[argc+1] = NULL; + + for (i=0; i> $cwrappersource <> $cwrappersource <> $cwrappersource <<"EOF" + return 127; +} + +void * +xmalloc (size_t num) +{ + void * p = (void *) malloc (num); + if (!p) + lt_fatal ("Memory exhausted"); + + return p; +} + +char * +xstrdup (const char *string) +{ + return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL +; +} + +const char * +base_name (const char *name) +{ + const char *base; + +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + /* Skip over the disk name in MSDOS pathnames. */ + if (isalpha ((unsigned char)name[0]) && name[1] == ':') + name += 2; +#endif + + for (base = name; *name; name++) + if (IS_DIR_SEPARATOR (*name)) + base = name + 1; + return base; +} + +int +check_executable(const char * path) +{ + struct stat st; + + DEBUG("(check_executable) : %s\n", path ? (*path ? path : "EMPTY!") : "NULL!"); + if ((!path) || (!*path)) + return 0; + + if ((stat (path, &st) >= 0) && + ( + /* MinGW & native WIN32 do not support S_IXOTH or S_IXGRP */ +#if defined (S_IXOTH) + ((st.st_mode & S_IXOTH) == S_IXOTH) || +#endif +#if defined (S_IXGRP) + ((st.st_mode & S_IXGRP) == S_IXGRP) || +#endif + ((st.st_mode & S_IXUSR) == S_IXUSR)) + ) + return 1; + else + return 0; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise */ +char * +find_executable (const char* wrapper) +{ + int has_slash = 0; + const char* p; + const char* p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char* concat_name; + + DEBUG("(find_executable) : %s\n", wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!"); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char)wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char* path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char* q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR(*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen(tmp); + concat_name = XMALLOC(char, tmp_len + 1 + strlen(wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = XMALLOC(char, p_len + 1 + strlen(wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen(tmp); + concat_name = XMALLOC(char, tmp_len + 1 + strlen(wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + return NULL; +} + +char * +strendzap(char *str, const char *pat) +{ + size_t len, patlen; + + assert(str != NULL); + assert(pat != NULL); + + len = strlen(str); + patlen = strlen(pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp(str, pat) == 0) + *str = '\0'; + } + return str; +} + +static void +lt_error_core (int exit_status, const char * mode, + const char * message, va_list ap) +{ + fprintf (stderr, "%s: %s: ", program_name, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, "FATAL", message, ap); + va_end (ap); +} +EOF + # we should really use a build-platform specific compiler + # here, but OTOH, the wrappers (shell script and this C one) + # are only useful if you want to execute the "real" binary. + # Since the "real" binary is built for $host, then this + # wrapper might as well be built for $host, too. + $run $LTCC $LTCFLAGS -s -o $cwrapper $cwrappersource + ;; + esac + $rm $output + trap "$rm $output; exit $EXIT_FAILURE" 1 2 15 + + $echo > $output "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='${SED} -e 1s/^X//' +sed_quote_subst='$sed_quote_subst' + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variable: + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$echo are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + echo=\"$qecho\" + file=\"\$0\" + # Make sure echo works. + if test \"X\$1\" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift + elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then + # Yippee, \$echo works! + : + else + # Restart under the correct shell, and then maybe \$echo will work. + exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} + fi + fi\ +" + $echo >> $output "\ + + # Find the directory that this script lives in. + thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` + done + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $echo >> $output "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || \\ + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $mkdir \"\$progdir\" + else + $rm \"\$progdir/\$file\" + fi" + + $echo >> $output "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $echo \"\$relink_command_output\" >&2 + $rm \"\$progdir/\$file\" + exit $EXIT_FAILURE + fi + fi + + $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $rm \"\$progdir/\$program\"; + $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $rm \"\$progdir/\$file\" + fi" + else + $echo >> $output "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $echo >> $output "\ + + if test -f \"\$progdir/\$program\"; then" + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $echo >> $output "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` + + export $shlibpath_var +" + fi + + # fixup the dll searchpath if we need to. + if test -n "$dllsearchpath"; then + $echo >> $output "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + $echo >> $output "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + + # Make sure env LD_LIBRARY_PATH does not mess us up + if test -n \"\${LD_LIBRARY_PATH+set}\"; then + export LD_LIBRARY_PATH=\$progdir:\$LD_LIBRARY_PATH + fi +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2*) + $echo >> $output "\ + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $echo >> $output "\ + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $echo >> $output "\ + \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\" + exit $EXIT_FAILURE + fi + else + # The program doesn't exist. + \$echo \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$echo \"This script is just a wrapper for \$program.\" 1>&2 + $echo \"See the $PACKAGE documentation for more information.\" 1>&2 + exit $EXIT_FAILURE + fi +fi\ +" + chmod +x $output + fi + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $addlibs + oldobjs="$oldobjs $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + $echo "X$obj" | $Xsed -e 's%^.*/%%' + done | sort | sort -uc >/dev/null 2>&1); then + : + else + $echo "copying selected object files to avoid basename conflicts..." + + if test -z "$gentop"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + $show "${rm}r $gentop" + $run ${rm}r "$gentop" + $show "$mkdir $gentop" + $run $mkdir "$gentop" + exit_status=$? + if test "$exit_status" -ne 0 && test ! -d "$gentop"; then + exit $exit_status + fi + fi + + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + objbase=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + counter=`expr $counter + 1` + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + $show "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + $run ln "$obj" "$gentop/$newobj" || + $run cp "$obj" "$gentop/$newobj" + oldobjs="$oldobjs $gentop/$newobj" + ;; + *) oldobjs="$oldobjs $obj" ;; + esac + done + fi + + eval cmds=\"$old_archive_cmds\" + + if len=`expr "X$cmds" : ".*"` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + $echo "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + for obj in $save_oldobjs + do + oldobjs="$objlist $obj" + objlist="$objlist $obj" + eval test_cmds=\"$old_archive_cmds\" + if len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + eval cmd=\"$cmd\" + IFS="$save_ifs" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + done + + if test -n "$generated"; then + $show "${rm}r$generated" + $run ${rm}r$generated + fi + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + $show "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` + relink_command="$var=\"$var_value\"; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + + # Only create the output if not a dry run. + if test -z "$run"; then + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + if test -z "$libdir"; then + $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + if test "X$EGREP" = X ; then + EGREP=egrep + fi + # We do not want portage's install root ($D) present. Check only for + # this if the .la is being installed. + if test "$installed" = yes && test "$D"; then + eval mynewdependency_lib=`echo "$libdir/$name" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + else + mynewdependency_lib="$libdir/$name" + fi + # Do not add duplicates + if test "$mynewdependency_lib"; then + my_little_ninja_foo_1=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"` + if test -z "$my_little_ninja_foo_1"; then + newdependency_libs="$newdependency_libs $mynewdependency_lib" + fi + fi + ;; + *) + if test "$installed" = yes; then + # Rather use S=WORKDIR if our version of portage supports it. + # This is because some ebuild (gcc) do not use $S as buildroot. + if test "$PWORKDIR"; then + S="$PWORKDIR" + fi + # We do not want portage's build root ($S) present. + my_little_ninja_foo_2=`echo $deplib |$EGREP -e "$S"` + # We do not want portage's install root ($D) present. + my_little_ninja_foo_3=`echo $deplib |$EGREP -e "$D"` + if test -n "$my_little_ninja_foo_2" && test "$S"; then + mynewdependency_lib="" + elif test -n "$my_little_ninja_foo_3" && test "$D"; then + eval mynewdependency_lib=`echo "$deplib" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + else + mynewdependency_lib="$deplib" + fi + else + mynewdependency_lib="$deplib" + fi + # Do not add duplicates + if test "$mynewdependency_lib"; then + my_little_ninja_foo_4=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"` + if test -z "$my_little_ninja_foo_4"; then + newdependency_libs="$newdependency_libs $mynewdependency_lib" + fi + fi + ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + for lib in $dlfiles; do + name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + if test -z "$libdir"; then + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + newdlfiles="$newdlfiles $libdir/$name" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + if test -z "$libdir"; then + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + newdlprefiles="$newdlprefiles $libdir/$name" + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlfiles="$newdlfiles $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlprefiles="$newdlprefiles $abs" + done + dlprefiles="$newdlprefiles" + fi + $rm $output + # place dlname in correct position for cygwin + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; + esac + # Do not add duplicates + if test "$installed" = yes && test "$D"; then + install_libdir=`echo "$install_libdir" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + fi + $echo > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $echo >> $output "\ +relink_command=\"$relink_command\"" + fi + done + fi + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)" + $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $? + ;; + esac + exit $EXIT_SUCCESS + ;; + + # libtool install mode + install) + modename="$modename: install" + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + $echo "X$nonopt" | grep shtool > /dev/null; then + # Aesthetically quote it. + arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$arg " + arg="$1" + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$install_prog$arg" + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + for arg + do + if test -n "$dest"; then + files="$files $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + case " $install_prog " in + *[\\\ /]cp\ *) ;; + *) prev=$arg ;; + esac + ;; + -g | -m | -o) prev=$arg ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$install_prog $arg" + done + + if test -z "$install_prog"; then + $echo "$modename: you must specify an install program" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test -n "$prev"; then + $echo "$modename: the \`$prev' option requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test -z "$files"; then + if test -z "$dest"; then + $echo "$modename: no file or destination specified" 1>&2 + else + $echo "$modename: you must specify a destination" 1>&2 + fi + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Strip any trailing slash from the destination. + dest=`$echo "X$dest" | $Xsed -e 's%/$%%'` + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'` + test "X$destdir" = "X$dest" && destdir=. + destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'` + + # Not a directory, so check to see that there is only one file specified. + set dummy $files + if test "$#" -gt 2; then + $echo "$modename: \`$dest' is not a directory" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + staticlibs="$staticlibs $file" + ;; + + *.la) + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$file' is not a valid libtool archive" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + library_names= + old_library= + relink_command= + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) current_libdirs="$current_libdirs $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) future_libdirs="$future_libdirs $libdir" ;; + esac + fi + + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/ + test "X$dir" = "X$file/" && dir= + dir="$dir$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + if test "$inst_prefix_dir" = "$destdir"; then + $echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2 + exit $EXIT_FAILURE + fi + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + $echo "$modename: warning: relinking \`$file'" 1>&2 + $show "$relink_command" + if $run eval "$relink_command"; then : + else + $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 + exit $EXIT_FAILURE + fi + fi + + # See the names of the shared library. + set dummy $library_names + if test -n "$2"; then + realname="$2" + shift + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + $show "$install_prog $dir/$srcname $destdir/$realname" + $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $? + if test -n "$stripme" && test -n "$striplib"; then + $show "$striplib $destdir/$realname" + $run eval "$striplib $destdir/$realname" || exit $? + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + if test "$linkname" != "$realname"; then + $show "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" + $run eval "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" + fi + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + cmds=$postinstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + fi + + # Install the pseudo-library for information purposes. + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + instname="$dir/$name"i + $show "$install_prog $instname $destdir/$name" + $run eval "$install_prog $instname $destdir/$name" || exit $? + + # Maybe install the static library, too. + test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"` + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + # Install the libtool object if requested. + if test -n "$destfile"; then + $show "$install_prog $file $destfile" + $run eval "$install_prog $file $destfile" || exit $? + fi + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + staticobj=`$echo "X$file" | $Xsed -e "$lo2o"` + + $show "$install_prog $staticobj $staticdest" + $run eval "$install_prog \$staticobj \$staticdest" || exit $? + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + file=`$echo $file|${SED} 's,.exe$,,'` + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin*|*mingw*) + wrapper=`$echo $file | ${SED} -e 's,.exe$,,'` + ;; + *) + wrapper=$file + ;; + esac + if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then + notinst_deplibs= + relink_command= + + # Note that it is not necessary on cygwin/mingw to append a dot to + # foo even if both foo and FILE.exe exist: automatic-append-.exe + # behavior happens only for exec(3), not for open(2)! Also, sourcing + # `FILE.' does not work on cygwin managed mounts. + # + # If there is no directory component, then add one. + case $wrapper in + */* | *\\*) . ${wrapper} ;; + *) . ./${wrapper} ;; + esac + + # Check the variables that should have been set. + if test -z "$notinst_deplibs"; then + $echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2 + exit $EXIT_FAILURE + fi + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + # If there is no directory component, then add one. + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + fi + libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2 + finalize=no + fi + done + + relink_command= + # Note that it is not necessary on cygwin/mingw to append a dot to + # foo even if both foo and FILE.exe exist: automatic-append-.exe + # behavior happens only for exec(3), not for open(2)! Also, sourcing + # `FILE.' does not work on cygwin managed mounts. + # + # If there is no directory component, then add one. + case $wrapper in + */* | *\\*) . ${wrapper} ;; + *) . ./${wrapper} ;; + esac + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + if test "$finalize" = yes && test -z "$run"; then + tmpdir=`func_mktempdir` + file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'` + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` + + $show "$relink_command" + if $run eval "$relink_command"; then : + else + $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 + ${rm}r "$tmpdir" + continue + fi + file="$outputname" + else + $echo "$modename: warning: cannot relink \`$file'" 1>&2 + fi + else + # Install the binary that we compiled earlier. + file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'` + ;; + esac + ;; + esac + $show "$install_prog$stripme $file $destfile" + $run eval "$install_prog\$stripme \$file \$destfile" || exit $? + test -n "$outputname" && ${rm}r "$tmpdir" + ;; + esac + done + + for file in $staticlibs; do + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + + $show "$install_prog $file $oldlib" + $run eval "$install_prog \$file \$oldlib" || exit $? + + if test -n "$stripme" && test -n "$old_striplib"; then + $show "$old_striplib $oldlib" + $run eval "$old_striplib $oldlib" || exit $? + fi + + # Do each command in the postinstall commands. + cmds=$old_postinstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + done + + if test -n "$future_libdirs"; then + $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2 + fi + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + test -n "$run" && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi + ;; + + # libtool finish mode + finish) + modename="$modename: finish" + libdirs="$nonopt" + admincmds= + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for dir + do + libdirs="$libdirs $dir" + done + + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + cmds=$finish_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || admincmds="$admincmds + $cmd" + done + IFS="$save_ifs" + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $run eval "$cmds" || admincmds="$admincmds + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + test "$show" = : && exit $EXIT_SUCCESS + + $echo "X----------------------------------------------------------------------" | $Xsed + $echo "Libraries have been installed in:" + for libdir in $libdirs; do + $echo " $libdir" + done + $echo + $echo "If you ever happen to want to link against installed libraries" + $echo "in a given directory, LIBDIR, you must either use libtool, and" + $echo "specify the full pathname of the library, or use the \`-LLIBDIR'" + $echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + $echo " - add LIBDIR to the \`$shlibpath_var' environment variable" + $echo " during execution" + fi + if test -n "$runpath_var"; then + $echo " - add LIBDIR to the \`$runpath_var' environment variable" + $echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $echo " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $echo " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + $echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + $echo + $echo "See any operating system documentation about shared libraries for" + $echo "more information, such as the ld(1) and ld.so(8) manual pages." + $echo "X----------------------------------------------------------------------" | $Xsed + exit $EXIT_SUCCESS + ;; + + # libtool execute mode + execute) + modename="$modename: execute" + + # The first argument is the command name. + cmd="$nonopt" + if test -z "$cmd"; then + $echo "$modename: you must specify a COMMAND" 1>&2 + $echo "$help" + exit $EXIT_FAILURE + fi + + # Handle -dlopen flags immediately. + for file in $execute_dlfiles; do + if test ! -f "$file"; then + $echo "$modename: \`$file' is not a file" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + dir= + case $file in + *.la) + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Read the libtool library. + dlname= + library_names= + + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'" + continue + fi + + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$file" && dir=. + + if test -f "$dir/$objdir/$dlname"; then + dir="$dir/$objdir" + else + $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2 + exit $EXIT_FAILURE + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$file" && dir=. + ;; + + *) + $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2 + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -*) ;; + *) + # Do a test to see if this is really a libtool program. + if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"` + args="$args \"$file\"" + done + + if test -z "$run"; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + if test "${save_LC_ALL+set}" = set; then + LC_ALL="$save_LC_ALL"; export LC_ALL + fi + if test "${save_LANG+set}" = set; then + LANG="$save_LANG"; export LANG + fi + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\"" + $echo "export $shlibpath_var" + fi + $echo "$cmd$args" + exit $EXIT_SUCCESS + fi + ;; + + # libtool clean and uninstall mode + clean | uninstall) + modename="$modename: $mode" + rm="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) rm="$rm $arg"; rmforce=yes ;; + -*) rm="$rm $arg" ;; + *) files="$files $arg" ;; + esac + done + + if test -z "$rm"; then + $echo "$modename: you must specify an RM program" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + rmdirs= + + origobjdir="$objdir" + for file in $files; do + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + if test "X$dir" = "X$file"; then + dir=. + objdir="$origobjdir" + else + objdir="$dir/$origobjdir" + fi + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + test "$mode" = uninstall && objdir="$dir" + + # Remember objdir for removal later, being careful to avoid duplicates + if test "$mode" = clean; then + case " $rmdirs " in + *" $objdir "*) ;; + *) rmdirs="$rmdirs $objdir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if (test -L "$file") >/dev/null 2>&1 \ + || (test -h "$file") >/dev/null 2>&1 \ + || test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + . $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + rmfiles="$rmfiles $objdir/$n" + done + test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" + + case "$mode" in + clean) + case " $library_names " in + # " " in the beginning catches empty $dlname + *" $dlname "*) ;; + *) rmfiles="$rmfiles $objdir/$dlname" ;; + esac + test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + cmds=$postuninstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" + if test "$?" -ne 0 && test "$rmforce" != yes; then + exit_status=1 + fi + done + IFS="$save_ifs" + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + cmds=$old_postuninstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" + if test "$?" -ne 0 && test "$rmforce" != yes; then + exit_status=1 + fi + done + IFS="$save_ifs" + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + + # Read the .lo file + . $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" \ + && test "$pic_object" != none; then + rmfiles="$rmfiles $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" \ + && test "$non_pic_object" != none; then + rmfiles="$rmfiles $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$mode" = clean ; then + noexename=$name + case $file in + *.exe) + file=`$echo $file|${SED} 's,.exe$,,'` + noexename=`$echo $name|${SED} 's,.exe$,,'` + # $file with .exe has already been added to rmfiles, + # add $file without .exe + rmfiles="$rmfiles $file" + ;; + esac + # Do a test to see if this is a libtool program. + if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + relink_command= + . $dir/$noexename + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + rmfiles="$rmfiles $objdir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + rmfiles="$rmfiles $objdir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + $show "$rm $rmfiles" + $run $rm $rmfiles || exit_status=1 + done + objdir="$origobjdir" + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + $show "rmdir $dir" + $run rmdir $dir >/dev/null 2>&1 + fi + done + + exit $exit_status + ;; + + "") + $echo "$modename: you must specify a MODE" 1>&2 + $echo "$generic_help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + if test -z "$exec_cmd"; then + $echo "$modename: invalid operation mode \`$mode'" 1>&2 + $echo "$generic_help" 1>&2 + exit $EXIT_FAILURE + fi +fi # test -z "$show_help" + +if test -n "$exec_cmd"; then + eval exec $exec_cmd + exit $EXIT_FAILURE +fi + +# We need to display help for each of the modes. +case $mode in +"") $echo \ +"Usage: $modename [OPTION]... [MODE-ARG]... + +Provide generalized library-building support services. + + --config show all configuration variables + --debug enable verbose shell tracing +-n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --finish same as \`--mode=finish' + --help display this help message and exit + --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS] + --quiet same as \`--silent' + --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + --version print version information + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for +a more detailed description of MODE. + +Report bugs to ." + exit $EXIT_SUCCESS + ;; + +clean) + $echo \ +"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + +compile) + $echo \ +"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -prefer-pic try to building PIC objects only + -prefer-non-pic try to building non-PIC objects only + -static always build a \`.o' file suitable for static linking + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + +execute) + $echo \ +"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + +finish) + $echo \ +"Usage: $modename [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + +install) + $echo \ +"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + +link) + $echo \ +"Usage: $modename [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -static do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + +uninstall) + $echo \ +"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + +*) + $echo "$modename: invalid operation mode \`$mode'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; +esac + +$echo +$echo "Try \`$modename --help' for more information about other modes." + +exit $? + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +disable_libs=shared +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +disable_libs=static +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: diff --git a/libraries/sqlite/unix/sqlite-3.5.1/main.mk b/libraries/sqlite/unix/sqlite-3.5.1/main.mk new file mode 100644 index 0000000000..c700c5bc73 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/main.mk @@ -0,0 +1,552 @@ +############################################################################### +# The following macros should be defined before this script is +# invoked: +# +# TOP The toplevel directory of the source tree. This is the +# directory that contains this "Makefile.in" and the +# "configure.in" script. +# +# BCC C Compiler and options for use in building executables that +# will run on the platform that is doing the build. +# +# THREADLIB Specify any extra linker options needed to make the library +# thread safe +# +# OPTS Extra compiler command-line options. +# +# EXE The suffix to add to executable files. ".exe" for windows +# and "" for Unix. +# +# TCC C Compiler and options for use in building executables that +# will run on the target platform. This is usually the same +# as BCC, unless you are cross-compiling. +# +# AR Tools used to build a static library. +# RANLIB +# +# TCL_FLAGS Extra compiler options needed for programs that use the +# TCL library. +# +# LIBTCL Linker options needed to link against the TCL library. +# +# READLINE_FLAGS Compiler options needed for programs that use the +# readline() library. +# +# LIBREADLINE Linker options needed by programs using readline() must +# link against. +# +# NAWK Nawk compatible awk program. Older (obsolete?) solaris +# systems need this to avoid using the original AT&T AWK. +# +# Once the macros above are defined, the rest of this make script will +# build the SQLite library and testing tools. +################################################################################ + +# This is how we compile +# +TCCX = $(TCC) $(OPTS) -I. -I$(TOP)/src + +# Object files for the SQLite library. +# +LIBOBJ+= alter.o analyze.o attach.o auth.o btmutex.o btree.o build.o \ + callback.o complete.o date.o delete.o \ + expr.o func.o hash.o insert.o journal.o loadext.o \ + main.o malloc.o mem1.o mem2.o mutex.o mutex_os2.o \ + mutex_unix.o mutex_w32.o \ + opcodes.o os.o os_os2.o os_unix.o os_win.o \ + pager.o parse.o pragma.o prepare.o printf.o random.o \ + select.o table.o tclsqlite.o tokenize.o trigger.o \ + update.o util.o vacuum.o \ + vdbe.o vdbeapi.o vdbeaux.o vdbeblob.o vdbefifo.o vdbemem.o \ + where.o utf.o legacy.o vtab.o + +EXTOBJ = icu.o +EXTOBJ += fts1.o \ + fts1_hash.o \ + fts1_tokenizer1.o \ + fts1_porter.o +EXTOBJ += fts2.o \ + fts2_hash.o \ + fts2_icu.o \ + fts2_porter.o \ + fts2_tokenizer.o \ + fts2_tokenizer1.o +EXTOBJ += fts3.o \ + fts3_hash.o \ + fts3_icu.o \ + fts3_porter.o \ + fts3_tokenizer.o \ + fts3_tokenizer1.o + +# All of the source code files. +# +SRC = \ + $(TOP)/src/alter.c \ + $(TOP)/src/analyze.c \ + $(TOP)/src/attach.c \ + $(TOP)/src/auth.c \ + $(TOP)/src/btmutex.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ + $(TOP)/src/build.c \ + $(TOP)/src/callback.c \ + $(TOP)/src/complete.c \ + $(TOP)/src/date.c \ + $(TOP)/src/delete.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/func.c \ + $(TOP)/src/hash.c \ + $(TOP)/src/hash.h \ + $(TOP)/src/insert.c \ + $(TOP)/src/journal.c \ + $(TOP)/src/legacy.c \ + $(TOP)/src/loadext.c \ + $(TOP)/src/main.c \ + $(TOP)/src/malloc.c \ + $(TOP)/src/mem1.c \ + $(TOP)/src/mem2.c \ + $(TOP)/src/mutex.c \ + $(TOP)/src/mutex.h \ + $(TOP)/src/mutex_os2.c \ + $(TOP)/src/mutex_unix.c \ + $(TOP)/src/mutex_w32.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pager.h \ + $(TOP)/src/parse.y \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/random.c \ + $(TOP)/src/select.c \ + $(TOP)/src/shell.c \ + $(TOP)/src/sqlite.h.in \ + $(TOP)/src/sqlite3ext.h \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/table.c \ + $(TOP)/src/tclsqlite.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/trigger.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/update.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vacuum.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbe.h \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbeblob.c \ + $(TOP)/src/vdbefifo.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/vdbeInt.h \ + $(TOP)/src/vtab.c \ + $(TOP)/src/where.c + +# Source code for extensions +# +SRC += \ + $(TOP)/ext/fts1/fts1.c \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.c \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_porter.c \ + $(TOP)/ext/fts1/fts1_tokenizer.h \ + $(TOP)/ext/fts1/fts1_tokenizer1.c +SRC += \ + $(TOP)/ext/fts2/fts2.c \ + $(TOP)/ext/fts2/fts2.h \ + $(TOP)/ext/fts2/fts2_hash.c \ + $(TOP)/ext/fts2/fts2_hash.h \ + $(TOP)/ext/fts2/fts2_icu.c \ + $(TOP)/ext/fts2/fts2_porter.c \ + $(TOP)/ext/fts2/fts2_tokenizer.h \ + $(TOP)/ext/fts2/fts2_tokenizer.c \ + $(TOP)/ext/fts2/fts2_tokenizer1.c +SRC += \ + $(TOP)/ext/fts3/fts3.c \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_hash.c \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_icu.c \ + $(TOP)/ext/fts3/fts3_porter.c \ + $(TOP)/ext/fts3/fts3_tokenizer.h \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/fts3/fts3_tokenizer1.c +SRC += \ + $(TOP)/ext/icu/icu.c + + +# Generated source code files +# +SRC += \ + keywordhash.h \ + opcodes.c \ + opcodes.h \ + parse.c \ + parse.h \ + sqlite3.h + + +# Source code to the test files. +# +TESTSRC = \ + $(TOP)/src/test1.c \ + $(TOP)/src/test2.c \ + $(TOP)/src/test3.c \ + $(TOP)/src/test4.c \ + $(TOP)/src/test5.c \ + $(TOP)/src/test6.c \ + $(TOP)/src/test7.c \ + $(TOP)/src/test8.c \ + $(TOP)/src/test9.c \ + $(TOP)/src/test_autoext.c \ + $(TOP)/src/test_async.c \ + $(TOP)/src/test_btree.c \ + $(TOP)/src/test_config.c \ + $(TOP)/src/test_hexio.c \ + $(TOP)/src/test_malloc.c \ + $(TOP)/src/test_md5.c \ + $(TOP)/src/test_onefile.c \ + $(TOP)/src/test_schema.c \ + $(TOP)/src/test_server.c \ + $(TOP)/src/test_tclvar.c \ + $(TOP)/src/test_thread.c \ + +TESTSRC += $(TOP)/ext/fts2/fts2_tokenizer.c +TESTSRC += $(TOP)/ext/fts3/fts3_tokenizer.c + +TESTSRC2 = \ + $(TOP)/src/attach.c $(TOP)/src/btree.c $(TOP)/src/build.c $(TOP)/src/date.c \ + $(TOP)/src/expr.c $(TOP)/src/func.c $(TOP)/src/insert.c $(TOP)/src/os.c \ + $(TOP)/src/os_os2.c $(TOP)/src/os_unix.c $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c $(TOP)/src/pragma.c $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c $(TOP)/src/select.c $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c $(TOP)/src/util.c $(TOP)/src/vdbeapi.c $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbe.c $(TOP)/src/vdbemem.c $(TOP)/src/where.c parse.c + +# Header files used by all library source files. +# +HDR = \ + $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ + $(TOP)/src/hash.h \ + keywordhash.h \ + $(TOP)/src/mutex.h \ + opcodes.h \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ + $(TOP)/src/pager.h \ + parse.h \ + sqlite3.h \ + $(TOP)/src/sqlite3ext.h \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/vdbe.h \ + $(TOP)/src/vdbeInt.h + +# Header files used by extensions +# +EXTHDR += \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_tokenizer.h +EXTHDR += \ + $(TOP)/ext/fts2/fts2.h \ + $(TOP)/ext/fts2/fts2_hash.h \ + $(TOP)/ext/fts2/fts2_tokenizer.h +EXTHDR += \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_tokenizer.h + +# This is the default Makefile target. The objects listed here +# are what get build when you type just "make" with no arguments. +# +all: sqlite3.h libsqlite3.a sqlite3$(EXE) + +# Generate the file "last_change" which contains the date of change +# of the most recently modified source code file +# +last_change: $(SRC) + cat $(SRC) | grep '$$Id: ' | sort -k 5 | tail -1 \ + | $(NAWK) '{print $$5,$$6}' >last_change + +libsqlite3.a: $(LIBOBJ) + $(AR) libsqlite3.a $(LIBOBJ) + $(RANLIB) libsqlite3.a + +sqlite3$(EXE): $(TOP)/src/shell.c libsqlite3.a sqlite3.h + $(TCCX) $(READLINE_FLAGS) -o sqlite3$(EXE) \ + $(TOP)/src/shell.c \ + libsqlite3.a $(LIBREADLINE) $(TLIBS) $(THREADLIB) + +objects: $(LIBOBJ_ORIG) + +# This target creates a directory named "tsrc" and fills it with +# copies of all of the C source code and header files needed to +# build on the target system. Some of the C source code and header +# files are automatically generated. This target takes care of +# all that automatic generation. +# +target_source: $(SRC) + rm -rf tsrc + mkdir tsrc + cp -f $(SRC) tsrc + rm tsrc/sqlite.h.in tsrc/parse.y + +sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl + tclsh $(TOP)/tool/mksqlite3c.tcl + cp sqlite3.c tclsqlite3.c + cat $(TOP)/src/tclsqlite.c >>tclsqlite3.c + tclsh $(TOP)/tool/mksqlite3internalh.tcl + +fts2amal.c: target_source $(TOP)/ext/fts2/mkfts2amal.tcl + tclsh $(TOP)/ext/fts2/mkfts2amal.tcl + +fts3amal.c: target_source $(TOP)/ext/fts3/mkfts3amal.tcl + tclsh $(TOP)/ext/fts3/mkfts3amal.tcl + +# Rules to build the LEMON compiler generator +# +lemon: $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c + $(BCC) -o lemon $(TOP)/tool/lemon.c + cp $(TOP)/tool/lempar.c . + +# Rules to build individual *.o files from files in the src directory. +# +%.o: %.c $(HDR) + $(TCCX) -c $< + +# Rules to build individual *.o files from generated *.c files. This +# applies to: +# +# parse.o +# opcodes.o +# +%.o: $(TOP)/src/%.c $(HDR) + $(TCCX) -c $< + +tclsqlite.o: $(TOP)/src/tclsqlite.c $(HDR) + $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/tclsqlite.c + + + +# Rules to build opcodes.c and opcodes.h +# +opcodes.c: opcodes.h $(TOP)/mkopcodec.awk + sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c + +opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk + cat parse.h $(TOP)/src/vdbe.c |$(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h + + +# Rules to build parse.c and parse.h - the outputs of lemon. +# +parse.h: parse.c + +parse.c: $(TOP)/src/parse.y lemon $(TOP)/addopcodes.awk + cp $(TOP)/src/parse.y . + ./lemon $(OPTS) parse.y + mv parse.h parse.h.temp + awk -f $(TOP)/addopcodes.awk parse.h.temp >parse.h + +sqlite3.h: $(TOP)/src/sqlite.h.in + sed -e s/--VERS--/`cat ${TOP}/VERSION`/ \ + -e s/--VERSION-NUMBER--/`cat ${TOP}/VERSION | sed 's/[^0-9]/ /g' | $(NAWK) '{printf "%d%03d%03d",$$1,$$2,$$3}'`/ \ + $(TOP)/src/sqlite.h.in >sqlite3.h + +keywordhash.h: $(TOP)/tool/mkkeywordhash.c + $(BCC) -o mkkeywordhash $(OPTS) $(TOP)/tool/mkkeywordhash.c + ./mkkeywordhash >keywordhash.h + + + +# Rules to build the extension objects. +# +icu.o: $(TOP)/ext/icu/icu.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/icu/icu.c + +fts2.o: $(TOP)/ext/fts2/fts2.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2.c + +fts2_hash.o: $(TOP)/ext/fts2/fts2_hash.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_hash.c + +fts2_icu.o: $(TOP)/ext/fts2/fts2_icu.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_icu.c + +fts2_porter.o: $(TOP)/ext/fts2/fts2_porter.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_porter.c + +fts2_tokenizer.o: $(TOP)/ext/fts2/fts2_tokenizer.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer.c + +fts2_tokenizer1.o: $(TOP)/ext/fts2/fts2_tokenizer1.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer1.c + +fts3.o: $(TOP)/ext/fts3/fts3.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3.c + +fts3_hash.o: $(TOP)/ext/fts3/fts3_hash.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_hash.c + +fts3_icu.o: $(TOP)/ext/fts3/fts3_icu.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_icu.c + +fts3_porter.o: $(TOP)/ext/fts3/fts3_porter.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_porter.c + +fts3_tokenizer.o: $(TOP)/ext/fts3/fts3_tokenizer.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer.c + +fts3_tokenizer1.o: $(TOP)/ext/fts3/fts3_tokenizer1.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer1.c + + +# Rules for building test programs and for running tests +# +tclsqlite3: $(TOP)/src/tclsqlite.c libsqlite3.a + $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -o tclsqlite3 \ + $(TOP)/src/tclsqlite.c libsqlite3.a $(LIBTCL) $(THREADLIB) + + +# Rules to build the 'testfixture' application. +# +TESTFIXTURE_FLAGS = -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 +TESTFIXTURE_FLAGS += -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE + +testfixture$(EXE): $(TESTSRC2) libsqlite3.a $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TESTSRC) $(TESTSRC2) $(TOP)/src/tclsqlite.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) libsqlite3.a + +amalgamation-testfixture$(EXE): sqlite3.c $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) + +fulltest: testfixture$(EXE) sqlite3$(EXE) + ./testfixture$(EXE) $(TOP)/test/all.test + +soaktest: testfixture$(EXE) sqlite3$(EXE) + ./testfixture$(EXE) $(TOP)/test/all.test -soak 1 + +test: testfixture$(EXE) sqlite3$(EXE) + ./testfixture$(EXE) $(TOP)/test/quick.test + +sqlite3_analyzer$(EXE): $(TOP)/src/tclsqlite.c sqlite3.c $(TESTSRC) \ + $(TOP)/tool/spaceanal.tcl + sed \ + -e '/^#/d' \ + -e 's,\\,\\\\,g' \ + -e 's,",\\",g' \ + -e 's,^,",' \ + -e 's,$$,\\n",' \ + $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h + $(TCCX) $(TCL_FLAGS) \ + -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_DEBUG=1 -DSQLITE_PRIVATE="" \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ + -o sqlite3_analyzer$(EXE) \ + $(LIBTCL) $(THREADLIB) + +TEST_EXTENSION = $(SHPREFIX)testloadext.$(SO) +$(TEST_EXTENSION): $(TOP)/src/test_loadext.c + $(MKSHLIB) $(TOP)/src/test_loadext.c -o $(TEST_EXTENSION) + +extensiontest: testfixture$(EXE) $(TEST_EXTENSION) + ./testfixture$(EXE) $(TOP)/test/loadext.test + + +# Rules used to build documentation +# +%.html: $(TOP)/www/%.tcl docdir last_change common.tcl + tclsh $< > $@ + +lang.html: $(TOP)/www/lang.tcl docdir + tclsh $(TOP)/www/lang.tcl doc >lang.html + +opcode.html: $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c + tclsh $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c >opcode.html + +capi3ref.html: $(TOP)/www/mkapidoc.tcl sqlite3.h + tclsh $(TOP)/www/mkapidoc.tcl capi3ref.html + +copyright-release.html: $(TOP)/www/copyright-release.html + cp $(TOP)/www/copyright-release.html . + +%: $(TOP)/www/% + cp $< $@ + +# Files to be published on the website. +# +DOC = \ + arch.html \ + autoinc.html \ + c_interface.html \ + capi3.html \ + capi3ref.html \ + changes.html \ + compile.html \ + copyright.html \ + copyright-release.html \ + copyright-release.pdf \ + conflict.html \ + datatypes.html \ + datatype3.html \ + different.html \ + docs.html \ + download.html \ + faq.html \ + fileformat.html \ + formatchng.html \ + index.html \ + limits.html \ + lang.html \ + lockingv3.html \ + mingw.html \ + nulls.html \ + oldnews.html \ + omitted.html \ + opcode.html \ + optimizer.html \ + optoverview.html \ + pragma.html \ + quickstart.html \ + sharedcache.html \ + speed.html \ + sqlite.html \ + support.html \ + tclsqlite.html \ + vdbe.html \ + version3.html \ + whentouse.html \ + 34to35.html + +docdir: + mkdir -p doc + +doc: common.tcl $(DOC) docdir + mv $(DOC) doc + cp $(TOP)/www/*.gif $(TOP)/art/*.gif doc + +# Standard install and cleanup targets +# +install: sqlite3 libsqlite3.a sqlite3.h + mv sqlite3 /usr/bin + mv libsqlite3.a /usr/lib + mv sqlite3.h /usr/include + +clean: + rm -f *.o sqlite3 libsqlite3.a sqlite3.h opcodes.* + rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h + rm -f $(PUBLISH) + rm -f *.da *.bb *.bbg gmon.out + rm -rf tsrc + rm -f testloadext.dll libtestloadext.so diff --git a/libraries/sqlite/unix/sqlite-3.5.1/mkdll.sh b/libraries/sqlite/unix/sqlite-3.5.1/mkdll.sh new file mode 100644 index 0000000000..e1e6d98818 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/mkdll.sh @@ -0,0 +1,45 @@ +#!/bin/sh +# +# This script is used to compile SQLite into a DLL. +# +# Two separate DLLs are generated. "sqlite3.dll" is the core +# library. "tclsqlite3.dll" contains the TCL bindings and is the +# library that is loaded into TCL in order to run SQLite. +# +make sqlite3.c +PATH=$PATH:/opt/mingw/bin +TCLDIR=/home/drh/tcltk/846/win/846win +TCLSTUBLIB=$TCLDIR/libtcl84stub.a +OPTS='-DUSE_TCL_STUBS=1 -DTHREADSAFE=1 -DBUILD_sqlite=1 -DOS_WIN=1' +CC="i386-mingw32msvc-gcc -O2 $OPTS -Itsrc -I$TCLDIR" +NM="i386-mingw32msvc-nm" +CMD="$CC -c sqlite3.c" +echo $CMD +$CMD +CMD="$CC -c tclsqlite3.c" +echo $CMD +$CMD +echo 'EXPORTS' >tclsqlite3.def +$NM tclsqlite3.o | grep ' T ' >temp1 +grep '_Init$' temp1 >temp2 +grep '_SafeInit$' temp1 >>temp2 +grep ' T _sqlite3_' temp1 >>temp2 +echo 'EXPORTS' >tclsqlite3.def +sed 's/^.* T _//' temp2 | sort | uniq >>tclsqlite3.def +i386-mingw32msvc-dllwrap \ + --def tclsqlite3.def -v --export-all \ + --driver-name i386-mingw32msvc-gcc \ + --dlltool-name i386-mingw32msvc-dlltool \ + --as i386-mingw32msvc-as \ + --target i386-mingw32 \ + -dllname tclsqlite3.dll -lmsvcrt tclsqlite3.o $TCLSTUBLIB +$NM sqlite3.o | grep ' T ' >temp1 +echo 'EXPORTS' >sqlite3.def +grep ' _sqlite3_' temp1 | sed 's/^.* _//' >>sqlite3.def +i386-mingw32msvc-dllwrap \ + --def sqlite3.def -v --export-all \ + --driver-name i386-mingw32msvc-gcc \ + --dlltool-name i386-mingw32msvc-dlltool \ + --as i386-mingw32msvc-as \ + --target i386-mingw32 \ + -dllname sqlite3.dll -lmsvcrt sqlite3.o diff --git a/libraries/sqlite/unix/sqlite-3.5.1/mkextu.sh b/libraries/sqlite/unix/sqlite-3.5.1/mkextu.sh new file mode 100644 index 0000000000..1d96897769 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/mkextu.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# +# This script is used to compile SQLite into a shared library on Linux. +# +# Two separate shared libraries are generated. "sqlite3.so" is the core +# library. "tclsqlite3.so" contains the TCL bindings and is the +# library that is loaded into TCL in order to run SQLite. +# +CFLAGS=-O2 -Wall +make fts2amal.c +echo gcc $CFLAGS -shared fts2amal.c -o fts2.so +gcc $CFLAGS -shared fts2amal.c -o fts2.so +strip fts2.so diff --git a/libraries/sqlite/unix/sqlite-3.5.1/mkextw.sh b/libraries/sqlite/unix/sqlite-3.5.1/mkextw.sh new file mode 100644 index 0000000000..3332f912f3 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/mkextw.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# +# This script is used to compile SQLite extensions into DLLs. +# +make fts2amal.c +PATH=$PATH:/opt/mingw/bin +OPTS='-DTHREADSAFE=1 -DBUILD_sqlite=1 -DOS_WIN=1' +CC="i386-mingw32msvc-gcc -O2 $OPTS -Itsrc" +NM="i386-mingw32msvc-nm" +CMD="$CC -c fts2amal.c" +echo $CMD +$CMD +echo 'EXPORTS' >fts2.def +echo 'sqlite3_extension_init' >>fts2.def +i386-mingw32msvc-dllwrap \ + --def fts2.def -v --export-all \ + --driver-name i386-mingw32msvc-gcc \ + --dlltool-name i386-mingw32msvc-dlltool \ + --as i386-mingw32msvc-as \ + --target i386-mingw32 \ + -dllname fts2.dll -lmsvcrt fts2amal.o +zip fts2dll.zip fts2.dll fts2.def diff --git a/libraries/sqlite/unix/sqlite-3.5.1/mkopcodec.awk b/libraries/sqlite/unix/sqlite-3.5.1/mkopcodec.awk new file mode 100644 index 0000000000..ec80953009 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/mkopcodec.awk @@ -0,0 +1,31 @@ +#!/usr/bin/awk -f +# +# This AWK script scans the opcodes.h file (which is itself generated by +# another awk script) and uses the information gleaned to create the +# opcodes.c source file. +# +# Opcodes.c contains strings which are the symbolic names for the various +# opcodes used by the VDBE. These strings are used when disassembling a +# VDBE program during tracing or as a result of the EXPLAIN keyword. +# +BEGIN { + print "/* Automatically generated. Do not edit */" + print "/* See the mkopcodec.awk script for details. */" + printf "#if !defined(SQLITE_OMIT_EXPLAIN)" + printf " || !defined(NDEBUG)" + printf " || defined(VDBE_PROFILE)" + print " || defined(SQLITE_DEBUG)" + print "const char *sqlite3OpcodeName(int i){" + print " static const char *const azName[] = { \"?\"," +} +/define OP_/ { + sub("OP_","",$2) + i++ + printf " /* %3d */ \"%s\",\n", $3, $2 +} +END { + print " };" + print " return azName[i];" + print "}" + print "#endif" +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/mkopcodeh.awk b/libraries/sqlite/unix/sqlite-3.5.1/mkopcodeh.awk new file mode 100644 index 0000000000..c0874b1439 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/mkopcodeh.awk @@ -0,0 +1,127 @@ +#!/usr/bin/awk -f +# +# Generate the file opcodes.h. +# +# This AWK script scans a concatenation of the parse.h output file from the +# parser and the vdbe.c source file in order to generate the opcodes numbers +# for all opcodes. +# +# The lines of the vdbe.c that we are interested in are of the form: +# +# case OP_aaaa: /* same as TK_bbbbb */ +# +# The TK_ comment is optional. If it is present, then the value assigned to +# the OP_ is the same as the TK_ value. If missing, the OP_ value is assigned +# a small integer that is different from every other OP_ value. +# +# We go to the trouble of making some OP_ values the same as TK_ values +# as an optimization. During parsing, things like expression operators +# are coded with TK_ values such as TK_ADD, TK_DIVIDE, and so forth. Later +# during code generation, we need to generate corresponding opcodes like +# OP_Add and OP_Divide. By making TK_ADD==OP_Add and TK_DIVIDE==OP_Divide, +# code to translate from one to the other is avoided. This makes the +# code generator run (infinitesimally) faster and more importantly it makes +# the library footprint smaller. +# +# This script also scans for lines of the form: +# +# case OP_aaaa: /* no-push */ +# +# When the no-push comment is found on an opcode, it means that that +# opcode does not leave a result on the stack. By identifying which +# opcodes leave results on the stack it is possible to determine a +# much smaller upper bound on the size of the stack. This allows +# a smaller stack to be allocated, which is important to embedded +# systems with limited memory space. This script generates a series +# of "NOPUSH_MASK" defines that contain bitmaps of opcodes that leave +# results on the stack. The NOPUSH_MASK defines are used in vdbeaux.c +# to help determine the maximum stack size. +# + + +# Remember the TK_ values from the parse.h file +/^#define TK_/ { + tk[$2] = $3 +} + +# Scan for "case OP_aaaa:" lines in the vdbe.c file +/^case OP_/ { + name = $2 + sub(/:/,"",name) + sub("\r","",name) + op[name] = -1 + for(i=3; imax ) max = op[name] + printf "#define %-25s %15d", name, op[name] + if( sameas[op[name]] ) { + printf " /* same as %-12s*/", sameas[op[name]] + } + printf "\n" + + } + seenUnused = 0; + for(i=1; i $HOME/.rpmmacros +echo "%_topdir %{HOME}/rpm" >> $HOME/.rpmmacros +mkdir $HOME/rpm +mkdir $HOME/rpm/BUILD +mkdir $HOME/rpm/SOURCES +mkdir $HOME/rpm/RPMS +mkdir $HOME/rpm/SRPMS +mkdir $HOME/rpm/SPECS + +# create the spec file from the template +sed s/SQLITE_VERSION/$VERS/g $srcdir/spec.template > $HOME/rpm/SPECS/sqlite.spec + +# copy the source tarball to the rpm directory +cp doc/sqlite-$VERS.tar.gz $HOME/rpm/SOURCES/. + +# build all the rpms +rpm -ba $HOME/rpm/SPECS/sqlite.spec >& rpm-$vers.log + +# copy the RPMs into the build directory. +mv $HOME/rpm/RPMS/i386/sqlite*-$vers*.rpm doc +mv $HOME/rpm/SRPMS/sqlite-$vers*.rpm doc + +# Build the website +# +#cp $srcdir/../historical/* doc +make doc +cd doc +chmod 644 *.gz diff --git a/libraries/sqlite/unix/sqlite-3.5.1/spec.template b/libraries/sqlite/unix/sqlite-3.5.1/spec.template new file mode 100644 index 0000000000..24c5eaebb8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/spec.template @@ -0,0 +1,62 @@ +%define name sqlite +%define version SQLITE_VERSION +%define release 1 + +Name: %{name} +Summary: SQLite is a C library that implements an embeddable SQL database engine +Version: %{version} +Release: %{release} +Source: %{name}-%{version}.tar.gz +Group: System/Libraries +URL: http://www.hwaci.com/sw/sqlite/ +License: Public Domain +BuildRoot: %{_tmppath}/%{name}-%{version}-root + +%description +SQLite is a C library that implements an embeddable SQL database engine. +Programs that link with the SQLite library can have SQL database access +without running a separate RDBMS process. The distribution comes with a +standalone command-line access program (sqlite) that can be used to +administer an SQLite database and which serves as an example of how to +use the SQLite library. + +%package -n %{name}-devel +Summary: Header files and libraries for developing apps which will use sqlite +Group: Development/C +Requires: %{name} = %{version}-%{release} + +%description -n %{name}-devel +The sqlite-devel package contains the header files and libraries needed +to develop programs that use the sqlite database library. + +%prep +%setup -q -n %{name} + +%build +CFLAGS="%optflags -DNDEBUG=1" CXXFLAGS="%optflags -DNDEBUG=1" ./configure --prefix=%{_prefix} + +make +make doc + +%install +install -d $RPM_BUILD_ROOT/%{_prefix} +install -d $RPM_BUILD_ROOT/%{_prefix}/bin +install -d $RPM_BUILD_ROOT/%{_prefix}/include +install -d $RPM_BUILD_ROOT/%{_prefix}/lib +make install prefix=$RPM_BUILD_ROOT/%{_prefix} + +%clean +rm -fr $RPM_BUILD_ROOT + +%files +%defattr(-, root, root) +%{_libdir}/*.so* +%{_bindir}/* + +%files -n %{name}-devel +%defattr(-, root, root) +%{_libdir}/pkgconfig/sqlite3.pc +%{_libdir}/*.a +%{_libdir}/*.la +%{_includedir}/* +%doc doc/* diff --git a/libraries/sqlite/unix/sqlite-3.5.1/sqlite.pc.in b/libraries/sqlite/unix/sqlite-3.5.1/sqlite.pc.in new file mode 100644 index 0000000000..16ed41c3d9 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/sqlite.pc.in @@ -0,0 +1,12 @@ +# Package Information for pkg-config + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: SQLite +Description: SQL database engine +Version: @VERSION@ +Libs: -L${libdir} -lsqlite +Cflags: -I${includedir} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/sqlite3.1 b/libraries/sqlite/unix/sqlite-3.5.1/sqlite3.1 new file mode 100644 index 0000000000..785995bf60 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/sqlite3.1 @@ -0,0 +1,229 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH SQLITE3 1 "Mon Apr 15 23:49:17 2002" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME +.B sqlite3 +\- A command line interface for SQLite version 3 + +.SH SYNOPSIS +.B sqlite3 +.RI [ options ] +.RI [ databasefile ] +.RI [ SQL ] + +.SH SUMMARY +.PP +.B sqlite3 +is a terminal-based front-end to the SQLite library that can evaluate +queries interactively and display the results in multiple formats. +.B sqlite3 +can also be used within shell scripts and other applications to provide +batch processing features. + +.SH DESCRIPTION +To start a +.B sqlite3 +interactive session, invoke the +.B sqlite3 +command and optionally provide the name of a database file. If the +database file does not exist, it will be created. If the database file +does exist, it will be opened. + +For example, to create a new database file named "mydata.db", create +a table named "memos" and insert a couple of records into that table: +.sp +$ +.B sqlite3 mydata.db +.br +SQLite version 3.1.3 +.br +Enter ".help" for instructions +.br +sqlite> +.B create table memos(text, priority INTEGER); +.br +sqlite> +.B insert into memos values('deliver project description', 10); +.br +sqlite> +.B insert into memos values('lunch with Christine', 100); +.br +sqlite> +.B select * from memos; +.br +deliver project description|10 +.br +lunch with Christine|100 +.br +sqlite> +.sp + +If no database name is supplied, the ATTACH sql command can be used +to attach to existing or create new database files. ATTACH can also +be used to attach to multiple databases within the same interactive +session. This is useful for migrating data between databases, +possibly changing the schema along the way. + +Optionally, a SQL statement or set of SQL statements can be supplied as +a single argument. Multiple statements should be separated by +semi-colons. + +For example: +.sp +$ +.B sqlite3 -line mydata.db 'select * from memos where priority > 20;' +.br + text = lunch with Christine +.br +priority = 100 +.br +.sp + +.SS SQLITE META-COMMANDS +.PP +The interactive interpreter offers a set of meta-commands that can be +used to control the output format, examine the currently attached +database files, or perform administrative operations upon the +attached databases (such as rebuilding indices). Meta-commands are +always prefixed with a dot (.). + +A list of available meta-commands can be viewed at any time by issuing +the '.help' command. For example: +.sp +sqlite> +.B .help +.nf +.cc | +.databases List names and files of attached databases +.dump ?TABLE? ... Dump the database in an SQL text format +.echo ON|OFF Turn command echo on or off +.exit Exit this program +.explain ON|OFF Turn output mode suitable for EXPLAIN on or off. +.header(s) ON|OFF Turn display of headers on or off +.help Show this message +.import FILE TABLE Import data from FILE into TABLE +.indices TABLE Show names of all indices on TABLE +.mode MODE ?TABLE? Set output mode where MODE is one of: + csv Comma-separated values + column Left-aligned columns. (See .width) + html HTML code + insert SQL insert statements for TABLE + line One value per line + list Values delimited by .separator string + tabs Tab-separated values + tcl TCL list elements +.nullvalue STRING Print STRING in place of NULL values +.output FILENAME Send output to FILENAME +.output stdout Send output to the screen +.prompt MAIN CONTINUE Replace the standard prompts +.quit Exit this program +.read FILENAME Execute SQL in FILENAME +.schema ?TABLE? Show the CREATE statements +.separator STRING Change separator used by output mode and .import +.show Show the current values for various settings +.tables ?PATTERN? List names of tables matching a LIKE pattern +.timeout MS Try opening locked tables for MS milliseconds +.width NUM NUM ... Set column widths for "column" mode +sqlite> +|cc . +.sp +.fi + +.SH OPTIONS +.B sqlite3 +has the following options: +.TP +.BI \-init\ file +Read and execute commands from +.I file +, which can contain a mix of SQL statements and meta-commands. +.TP +.B \-echo +Print commands before execution. +.TP +.B \-[no]header +Turn headers on or off. +.TP +.B \-column +Query results will be displayed in a table like form, using +whitespace characters to separate the columns and align the +output. +.TP +.B \-html +Query results will be output as simple HTML tables. +.TP +.B \-line +Query results will be displayed with one value per line, rows +separated by a blank line. Designed to be easily parsed by +scripts or other programs +.TP +.B \-list +Query results will be displayed with the separator (|, by default) +character between each field value. The default. +.TP +.BI \-separator\ separator +Set output field separator. Default is '|'. +.TP +.BI \-nullvalue\ string +Set string used to represent NULL values. Default is '' +(empty string). +.TP +.B \-version +Show SQLite version. +.TP +.B \-help +Show help on options and exit. + + +.SH INIT FILE +.B sqlite3 +reads an initialization file to set the configuration of the +interactive environment. Throughout initialization, any previously +specified setting can be overridden. The sequence of initialization is +as follows: + +o The default configuration is established as follows: + +.sp +.nf +.cc | +mode = LIST +separator = "|" +main prompt = "sqlite> " +continue prompt = " ...> " +|cc . +.sp +.fi + +o If the file +.B ~/.sqliterc +exists, it is processed first. +can be found in the user's home directory, it is +read and processed. It should generally only contain meta-commands. + +o If the -init option is present, the specified file is processed. + +o All other command line options are processed. + +.SH SEE ALSO +http://www.sqlite.org/ +.br +The sqlite-doc package +.SH AUTHOR +This manual page was originally written by Andreas Rottmann +, for the Debian GNU/Linux system (but may be used +by others). It was subsequently revised by Bill Bumgarner . diff --git a/libraries/sqlite/unix/sqlite-3.5.1/sqlite3.pc.in b/libraries/sqlite/unix/sqlite-3.5.1/sqlite3.pc.in new file mode 100644 index 0000000000..c14b5ba33a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/sqlite3.pc.in @@ -0,0 +1,12 @@ +# Package Information for pkg-config + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: SQLite +Description: SQL database engine +Version: @VERSION@ +Libs: -L${libdir} -lsqlite3 +Cflags: -I${includedir} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/alter.c b/libraries/sqlite/unix/sqlite-3.5.1/src/alter.c new file mode 100644 index 0000000000..9d1ebd22cb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/alter.c @@ -0,0 +1,622 @@ +/* +** 2005 February 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that used to generate VDBE code +** that implements the ALTER TABLE command. +** +** $Id: alter.c,v 1.32 2007/08/29 14:06:23 danielk1977 Exp $ +*/ +#include "sqliteInt.h" +#include + +/* +** The code in this file only exists if we are not omitting the +** ALTER TABLE logic from the build. +*/ +#ifndef SQLITE_OMIT_ALTERTABLE + + +/* +** This function is used by SQL generated to implement the +** ALTER TABLE command. The first argument is the text of a CREATE TABLE or +** CREATE INDEX command. The second is a table name. The table name in +** the CREATE TABLE or CREATE INDEX statement is replaced with the third +** argument and the result returned. Examples: +** +** sqlite_rename_table('CREATE TABLE abc(a, b, c)', 'def') +** -> 'CREATE TABLE def(a, b, c)' +** +** sqlite_rename_table('CREATE INDEX i ON abc(a)', 'def') +** -> 'CREATE INDEX i ON def(a, b, c)' +*/ +static void renameTableFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + unsigned char const *zSql = sqlite3_value_text(argv[0]); + unsigned char const *zTableName = sqlite3_value_text(argv[1]); + + int token; + Token tname; + unsigned char const *zCsr = zSql; + int len = 0; + char *zRet; + + sqlite3 *db = sqlite3_user_data(context); + + /* The principle used to locate the table name in the CREATE TABLE + ** statement is that the table name is the first token that is immediatedly + ** followed by a left parenthesis - TK_LP - or "USING" TK_USING. + */ + if( zSql ){ + do { + if( !*zCsr ){ + /* Ran out of input before finding an opening bracket. Return NULL. */ + return; + } + + /* Store the token that zCsr points to in tname. */ + tname.z = zCsr; + tname.n = len; + + /* Advance zCsr to the next token. Store that token type in 'token', + ** and it's length in 'len' (to be used next iteration of this loop). + */ + do { + zCsr += len; + len = sqlite3GetToken(zCsr, &token); + } while( token==TK_SPACE ); + assert( len>0 ); + } while( token!=TK_LP && token!=TK_USING ); + + zRet = sqlite3MPrintf(db, "%.*s%Q%s", tname.z - zSql, zSql, + zTableName, tname.z+tname.n); + sqlite3_result_text(context, zRet, -1, sqlite3_free); + } +} + +#ifndef SQLITE_OMIT_TRIGGER +/* This function is used by SQL generated to implement the +** ALTER TABLE command. The first argument is the text of a CREATE TRIGGER +** statement. The second is a table name. The table name in the CREATE +** TRIGGER statement is replaced with the third argument and the result +** returned. This is analagous to renameTableFunc() above, except for CREATE +** TRIGGER, not CREATE INDEX and CREATE TABLE. +*/ +static void renameTriggerFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + unsigned char const *zSql = sqlite3_value_text(argv[0]); + unsigned char const *zTableName = sqlite3_value_text(argv[1]); + + int token; + Token tname; + int dist = 3; + unsigned char const *zCsr = zSql; + int len = 0; + char *zRet; + + sqlite3 *db = sqlite3_user_data(context); + + /* The principle used to locate the table name in the CREATE TRIGGER + ** statement is that the table name is the first token that is immediatedly + ** preceded by either TK_ON or TK_DOT and immediatedly followed by one + ** of TK_WHEN, TK_BEGIN or TK_FOR. + */ + if( zSql ){ + do { + + if( !*zCsr ){ + /* Ran out of input before finding the table name. Return NULL. */ + return; + } + + /* Store the token that zCsr points to in tname. */ + tname.z = zCsr; + tname.n = len; + + /* Advance zCsr to the next token. Store that token type in 'token', + ** and it's length in 'len' (to be used next iteration of this loop). + */ + do { + zCsr += len; + len = sqlite3GetToken(zCsr, &token); + }while( token==TK_SPACE ); + assert( len>0 ); + + /* Variable 'dist' stores the number of tokens read since the most + ** recent TK_DOT or TK_ON. This means that when a WHEN, FOR or BEGIN + ** token is read and 'dist' equals 2, the condition stated above + ** to be met. + ** + ** Note that ON cannot be a database, table or column name, so + ** there is no need to worry about syntax like + ** "CREATE TRIGGER ... ON ON.ON BEGIN ..." etc. + */ + dist++; + if( token==TK_DOT || token==TK_ON ){ + dist = 0; + } + } while( dist!=2 || (token!=TK_WHEN && token!=TK_FOR && token!=TK_BEGIN) ); + + /* Variable tname now contains the token that is the old table-name + ** in the CREATE TRIGGER statement. + */ + zRet = sqlite3MPrintf(db, "%.*s%Q%s", tname.z - zSql, zSql, + zTableName, tname.z+tname.n); + sqlite3_result_text(context, zRet, -1, sqlite3_free); + } +} +#endif /* !SQLITE_OMIT_TRIGGER */ + +/* +** Register built-in functions used to help implement ALTER TABLE +*/ +void sqlite3AlterFunctions(sqlite3 *db){ + static const struct { + char *zName; + signed char nArg; + void (*xFunc)(sqlite3_context*,int,sqlite3_value **); + } aFuncs[] = { + { "sqlite_rename_table", 2, renameTableFunc}, +#ifndef SQLITE_OMIT_TRIGGER + { "sqlite_rename_trigger", 2, renameTriggerFunc}, +#endif + }; + int i; + + for(i=0; idb->aDb[1].pSchema; /* Temp db schema */ + + /* If the table is not located in the temp-db (in which case NULL is + ** returned, loop through the tables list of triggers. For each trigger + ** that is not part of the temp-db schema, add a clause to the WHERE + ** expression being built up in zWhere. + */ + if( pTab->pSchema!=pTempSchema ){ + sqlite3 *db = pParse->db; + for( pTrig=pTab->pTrigger; pTrig; pTrig=pTrig->pNext ){ + if( pTrig->pSchema==pTempSchema ){ + if( !zWhere ){ + zWhere = sqlite3MPrintf(db, "name=%Q", pTrig->name); + }else{ + tmp = zWhere; + zWhere = sqlite3MPrintf(db, "%s OR name=%Q", zWhere, pTrig->name); + sqlite3_free(tmp); + } + } + } + } + return zWhere; +} + +/* +** Generate code to drop and reload the internal representation of table +** pTab from the database, including triggers and temporary triggers. +** Argument zName is the name of the table in the database schema at +** the time the generated code is executed. This can be different from +** pTab->zName if this function is being called to code part of an +** "ALTER TABLE RENAME TO" statement. +*/ +static void reloadTableSchema(Parse *pParse, Table *pTab, const char *zName){ + Vdbe *v; + char *zWhere; + int iDb; /* Index of database containing pTab */ +#ifndef SQLITE_OMIT_TRIGGER + Trigger *pTrig; +#endif + + v = sqlite3GetVdbe(pParse); + if( !v ) return; + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + assert( iDb>=0 ); + +#ifndef SQLITE_OMIT_TRIGGER + /* Drop any table triggers from the internal schema. */ + for(pTrig=pTab->pTrigger; pTrig; pTrig=pTrig->pNext){ + int iTrigDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); + assert( iTrigDb==iDb || iTrigDb==1 ); + sqlite3VdbeOp3(v, OP_DropTrigger, iTrigDb, 0, pTrig->name, 0); + } +#endif + + /* Drop the table and index from the internal schema */ + sqlite3VdbeOp3(v, OP_DropTable, iDb, 0, pTab->zName, 0); + + /* Reload the table, index and permanent trigger schemas. */ + zWhere = sqlite3MPrintf(pParse->db, "tbl_name=%Q", zName); + if( !zWhere ) return; + sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 0, zWhere, P3_DYNAMIC); + +#ifndef SQLITE_OMIT_TRIGGER + /* Now, if the table is not stored in the temp database, reload any temp + ** triggers. Don't use IN(...) in case SQLITE_OMIT_SUBQUERY is defined. + */ + if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ + sqlite3VdbeOp3(v, OP_ParseSchema, 1, 0, zWhere, P3_DYNAMIC); + } +#endif +} + +/* +** Generate code to implement the "ALTER TABLE xxx RENAME TO yyy" +** command. +*/ +void sqlite3AlterRenameTable( + Parse *pParse, /* Parser context. */ + SrcList *pSrc, /* The table to rename. */ + Token *pName /* The new table name. */ +){ + int iDb; /* Database that contains the table */ + char *zDb; /* Name of database iDb */ + Table *pTab; /* Table being renamed */ + char *zName = 0; /* NULL-terminated version of pName */ + sqlite3 *db = pParse->db; /* Database connection */ + int nTabName; /* Number of UTF-8 characters in zTabName */ + const char *zTabName; /* Original name of the table */ + Vdbe *v; +#ifndef SQLITE_OMIT_TRIGGER + char *zWhere = 0; /* Where clause to locate temp triggers */ +#endif + int isVirtualRename = 0; /* True if this is a v-table with an xRename() */ + + if( db->mallocFailed ) goto exit_rename_table; + assert( pSrc->nSrc==1 ); + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + + pTab = sqlite3LocateTable(pParse, pSrc->a[0].zName, pSrc->a[0].zDatabase); + if( !pTab ) goto exit_rename_table; + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + zDb = db->aDb[iDb].zName; + + /* Get a NULL terminated version of the new table name. */ + zName = sqlite3NameFromToken(db, pName); + if( !zName ) goto exit_rename_table; + + /* Check that a table or index named 'zName' does not already exist + ** in database iDb. If so, this is an error. + */ + if( sqlite3FindTable(db, zName, zDb) || sqlite3FindIndex(db, zName, zDb) ){ + sqlite3ErrorMsg(pParse, + "there is already another table or index with this name: %s", zName); + goto exit_rename_table; + } + + /* Make sure it is not a system table being altered, or a reserved name + ** that the table is being renamed to. + */ + if( strlen(pTab->zName)>6 && 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7) ){ + sqlite3ErrorMsg(pParse, "table %s may not be altered", pTab->zName); + goto exit_rename_table; + } + if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ + goto exit_rename_table; + } + +#ifndef SQLITE_OMIT_AUTHORIZATION + /* Invoke the authorization callback. */ + if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ + goto exit_rename_table; + } +#endif + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto exit_rename_table; + } + if( IsVirtual(pTab) && pTab->pMod->pModule->xRename ){ + isVirtualRename = 1; + } +#endif + + /* Begin a transaction and code the VerifyCookie for database iDb. + ** Then modify the schema cookie (since the ALTER TABLE modifies the + ** schema). Open a statement transaction if the table is a virtual + ** table. + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ){ + goto exit_rename_table; + } + sqlite3BeginWriteOperation(pParse, isVirtualRename, iDb); + sqlite3ChangeCookie(db, v, iDb); + + /* If this is a virtual table, invoke the xRename() function if + ** one is defined. The xRename() callback will modify the names + ** of any resources used by the v-table implementation (including other + ** SQLite tables) that are identified by the name of the virtual table. + */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( isVirtualRename ){ + sqlite3VdbeOp3(v, OP_String8, 0, 0, zName, 0); + sqlite3VdbeOp3(v, OP_VRename, 0, 0, (const char*)pTab->pVtab, P3_VTAB); + } +#endif + + /* figure out how many UTF-8 characters are in zName */ + zTabName = pTab->zName; + nTabName = sqlite3Utf8CharLen(zTabName, -1); + + /* Modify the sqlite_master table to use the new table name. */ + sqlite3NestedParse(pParse, + "UPDATE %Q.%s SET " +#ifdef SQLITE_OMIT_TRIGGER + "sql = sqlite_rename_table(sql, %Q), " +#else + "sql = CASE " + "WHEN type = 'trigger' THEN sqlite_rename_trigger(sql, %Q)" + "ELSE sqlite_rename_table(sql, %Q) END, " +#endif + "tbl_name = %Q, " + "name = CASE " + "WHEN type='table' THEN %Q " + "WHEN name LIKE 'sqlite_autoindex%%' AND type='index' THEN " + "'sqlite_autoindex_' || %Q || substr(name,%d+18,10) " + "ELSE name END " + "WHERE tbl_name=%Q AND " + "(type='table' OR type='index' OR type='trigger');", + zDb, SCHEMA_TABLE(iDb), zName, zName, zName, +#ifndef SQLITE_OMIT_TRIGGER + zName, +#endif + zName, nTabName, zTabName + ); + +#ifndef SQLITE_OMIT_AUTOINCREMENT + /* If the sqlite_sequence table exists in this database, then update + ** it with the new table name. + */ + if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){ + sqlite3NestedParse(pParse, + "UPDATE %Q.sqlite_sequence set name = %Q WHERE name = %Q", + zDb, zName, pTab->zName); + } +#endif + +#ifndef SQLITE_OMIT_TRIGGER + /* If there are TEMP triggers on this table, modify the sqlite_temp_master + ** table. Don't do this if the table being ALTERed is itself located in + ** the temp database. + */ + if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ + sqlite3NestedParse(pParse, + "UPDATE sqlite_temp_master SET " + "sql = sqlite_rename_trigger(sql, %Q), " + "tbl_name = %Q " + "WHERE %s;", zName, zName, zWhere); + sqlite3_free(zWhere); + } +#endif + + /* Drop and reload the internal table schema. */ + reloadTableSchema(pParse, pTab, zName); + +exit_rename_table: + sqlite3SrcListDelete(pSrc); + sqlite3_free(zName); +} + + +/* +** This function is called after an "ALTER TABLE ... ADD" statement +** has been parsed. Argument pColDef contains the text of the new +** column definition. +** +** The Table structure pParse->pNewTable was extended to include +** the new column during parsing. +*/ +void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ + Table *pNew; /* Copy of pParse->pNewTable */ + Table *pTab; /* Table being altered */ + int iDb; /* Database number */ + const char *zDb; /* Database name */ + const char *zTab; /* Table name */ + char *zCol; /* Null-terminated column definition */ + Column *pCol; /* The new column */ + Expr *pDflt; /* Default value for the new column */ + sqlite3 *db; /* The database connection; */ + + if( pParse->nErr ) return; + pNew = pParse->pNewTable; + assert( pNew ); + + db = pParse->db; + assert( sqlite3BtreeHoldsAllMutexes(db) ); + iDb = sqlite3SchemaToIndex(db, pNew->pSchema); + zDb = db->aDb[iDb].zName; + zTab = pNew->zName; + pCol = &pNew->aCol[pNew->nCol-1]; + pDflt = pCol->pDflt; + pTab = sqlite3FindTable(db, zTab, zDb); + assert( pTab ); + +#ifndef SQLITE_OMIT_AUTHORIZATION + /* Invoke the authorization callback. */ + if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ + return; + } +#endif + + /* If the default value for the new column was specified with a + ** literal NULL, then set pDflt to 0. This simplifies checking + ** for an SQL NULL default below. + */ + if( pDflt && pDflt->op==TK_NULL ){ + pDflt = 0; + } + + /* Check that the new column is not specified as PRIMARY KEY or UNIQUE. + ** If there is a NOT NULL constraint, then the default value for the + ** column must not be NULL. + */ + if( pCol->isPrimKey ){ + sqlite3ErrorMsg(pParse, "Cannot add a PRIMARY KEY column"); + return; + } + if( pNew->pIndex ){ + sqlite3ErrorMsg(pParse, "Cannot add a UNIQUE column"); + return; + } + if( pCol->notNull && !pDflt ){ + sqlite3ErrorMsg(pParse, + "Cannot add a NOT NULL column with default value NULL"); + return; + } + + /* Ensure the default expression is something that sqlite3ValueFromExpr() + ** can handle (i.e. not CURRENT_TIME etc.) + */ + if( pDflt ){ + sqlite3_value *pVal; + if( sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_NONE, &pVal) ){ + db->mallocFailed = 1; + return; + } + if( !pVal ){ + sqlite3ErrorMsg(pParse, "Cannot add a column with non-constant default"); + return; + } + sqlite3ValueFree(pVal); + } + + /* Modify the CREATE TABLE statement. */ + zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n); + if( zCol ){ + char *zEnd = &zCol[pColDef->n-1]; + while( (zEnd>zCol && *zEnd==';') || isspace(*(unsigned char *)zEnd) ){ + *zEnd-- = '\0'; + } + sqlite3NestedParse(pParse, + "UPDATE %Q.%s SET " + "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d,length(sql)) " + "WHERE type = 'table' AND name = %Q", + zDb, SCHEMA_TABLE(iDb), pNew->addColOffset, zCol, pNew->addColOffset+1, + zTab + ); + sqlite3_free(zCol); + } + + /* If the default value of the new column is NULL, then set the file + ** format to 2. If the default value of the new column is not NULL, + ** the file format becomes 3. + */ + sqlite3MinimumFileFormat(pParse, iDb, pDflt ? 3 : 2); + + /* Reload the schema of the modified table. */ + reloadTableSchema(pParse, pTab, pTab->zName); +} + +/* +** This function is called by the parser after the table-name in +** an "ALTER TABLE ADD" statement is parsed. Argument +** pSrc is the full-name of the table being altered. +** +** This routine makes a (partial) copy of the Table structure +** for the table being altered and sets Parse.pNewTable to point +** to it. Routines called by the parser as the column definition +** is parsed (i.e. sqlite3AddColumn()) add the new Column data to +** the copy. The copy of the Table structure is deleted by tokenize.c +** after parsing is finished. +** +** Routine sqlite3AlterFinishAddColumn() will be called to complete +** coding the "ALTER TABLE ... ADD" statement. +*/ +void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ + Table *pNew; + Table *pTab; + Vdbe *v; + int iDb; + int i; + int nAlloc; + sqlite3 *db = pParse->db; + + /* Look up the table being altered. */ + assert( pParse->pNewTable==0 ); + assert( sqlite3BtreeHoldsAllMutexes(db) ); + if( db->mallocFailed ) goto exit_begin_add_column; + pTab = sqlite3LocateTable(pParse, pSrc->a[0].zName, pSrc->a[0].zDatabase); + if( !pTab ) goto exit_begin_add_column; + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + sqlite3ErrorMsg(pParse, "virtual tables may not be altered"); + goto exit_begin_add_column; + } +#endif + + /* Make sure this is not an attempt to ALTER a view. */ + if( pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "Cannot add a column to a view"); + goto exit_begin_add_column; + } + + assert( pTab->addColOffset>0 ); + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + + /* Put a copy of the Table struct in Parse.pNewTable for the + ** sqlite3AddColumn() function and friends to modify. + */ + pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table)); + if( !pNew ) goto exit_begin_add_column; + pParse->pNewTable = pNew; + pNew->nRef = 1; + pNew->nCol = pTab->nCol; + assert( pNew->nCol>0 ); + nAlloc = (((pNew->nCol-1)/8)*8)+8; + assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); + pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); + pNew->zName = sqlite3DbStrDup(db, pTab->zName); + if( !pNew->aCol || !pNew->zName ){ + db->mallocFailed = 1; + goto exit_begin_add_column; + } + memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); + for(i=0; inCol; i++){ + Column *pCol = &pNew->aCol[i]; + pCol->zName = sqlite3DbStrDup(db, pCol->zName); + pCol->zColl = 0; + pCol->zType = 0; + pCol->pDflt = 0; + } + pNew->pSchema = db->aDb[iDb].pSchema; + pNew->addColOffset = pTab->addColOffset; + pNew->nRef = 1; + + /* Begin a transaction and increment the schema cookie. */ + sqlite3BeginWriteOperation(pParse, 0, iDb); + v = sqlite3GetVdbe(pParse); + if( !v ) goto exit_begin_add_column; + sqlite3ChangeCookie(db, v, iDb); + +exit_begin_add_column: + sqlite3SrcListDelete(pSrc); + return; +} +#endif /* SQLITE_ALTER_TABLE */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/analyze.c b/libraries/sqlite/unix/sqlite-3.5.1/src/analyze.c new file mode 100644 index 0000000000..119e7f6484 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/analyze.c @@ -0,0 +1,418 @@ +/* +** 2005 July 8 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code associated with the ANALYZE command. +** +** @(#) $Id: analyze.c,v 1.23 2007/08/29 17:43:20 drh Exp $ +*/ +#ifndef SQLITE_OMIT_ANALYZE +#include "sqliteInt.h" + +/* +** This routine generates code that opens the sqlite_stat1 table on cursor +** iStatCur. +** +** If the sqlite_stat1 tables does not previously exist, it is created. +** If it does previously exist, all entires associated with table zWhere +** are removed. If zWhere==0 then all entries are removed. +*/ +static void openStatTable( + Parse *pParse, /* Parsing context */ + int iDb, /* The database we are looking in */ + int iStatCur, /* Open the sqlite_stat1 table on this cursor */ + const char *zWhere /* Delete entries associated with this table */ +){ + sqlite3 *db = pParse->db; + Db *pDb; + int iRootPage; + Table *pStat; + Vdbe *v = sqlite3GetVdbe(pParse); + + if( v==0 ) return; + assert( sqlite3BtreeHoldsAllMutexes(db) ); + assert( sqlite3VdbeDb(v)==db ); + pDb = &db->aDb[iDb]; + if( (pStat = sqlite3FindTable(db, "sqlite_stat1", pDb->zName))==0 ){ + /* The sqlite_stat1 tables does not exist. Create it. + ** Note that a side-effect of the CREATE TABLE statement is to leave + ** the rootpage of the new table on the top of the stack. This is + ** important because the OpenWrite opcode below will be needing it. */ + sqlite3NestedParse(pParse, + "CREATE TABLE %Q.sqlite_stat1(tbl,idx,stat)", + pDb->zName + ); + iRootPage = 0; /* Cause rootpage to be taken from top of stack */ + }else if( zWhere ){ + /* The sqlite_stat1 table exists. Delete all entries associated with + ** the table zWhere. */ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.sqlite_stat1 WHERE tbl=%Q", + pDb->zName, zWhere + ); + iRootPage = pStat->tnum; + }else{ + /* The sqlite_stat1 table already exists. Delete all rows. */ + iRootPage = pStat->tnum; + sqlite3VdbeAddOp(v, OP_Clear, pStat->tnum, iDb); + } + + /* Open the sqlite_stat1 table for writing. Unless it was created + ** by this vdbe program, lock it for writing at the shared-cache level. + ** If this vdbe did create the sqlite_stat1 table, then it must have + ** already obtained a schema-lock, making the write-lock redundant. + */ + if( iRootPage>0 ){ + sqlite3TableLock(pParse, iDb, iRootPage, 1, "sqlite_stat1"); + } + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + sqlite3VdbeAddOp(v, OP_OpenWrite, iStatCur, iRootPage); + sqlite3VdbeAddOp(v, OP_SetNumColumns, iStatCur, 3); +} + +/* +** Generate code to do an analysis of all indices associated with +** a single table. +*/ +static void analyzeOneTable( + Parse *pParse, /* Parser context */ + Table *pTab, /* Table whose indices are to be analyzed */ + int iStatCur, /* Cursor that writes to the sqlite_stat1 table */ + int iMem /* Available memory locations begin here */ +){ + Index *pIdx; /* An index to being analyzed */ + int iIdxCur; /* Cursor number for index being analyzed */ + int nCol; /* Number of columns in the index */ + Vdbe *v; /* The virtual machine being built up */ + int i; /* Loop counter */ + int topOfLoop; /* The top of the loop */ + int endOfLoop; /* The end of the loop */ + int addr; /* The address of an instruction */ + int iDb; /* Index of database containing pTab */ + + v = sqlite3GetVdbe(pParse); + if( v==0 || pTab==0 || pTab->pIndex==0 ){ + /* Do no analysis for tables that have no indices */ + return; + } + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + assert( iDb>=0 ); +#ifndef SQLITE_OMIT_AUTHORIZATION + if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0, + pParse->db->aDb[iDb].zName ) ){ + return; + } +#endif + + /* Establish a read-lock on the table at the shared-cache level. */ + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + + iIdxCur = pParse->nTab; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); + + /* Open a cursor to the index to be analyzed + */ + assert( iDb==sqlite3SchemaToIndex(pParse->db, pIdx->pSchema) ); + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + VdbeComment((v, "# %s", pIdx->zName)); + sqlite3VdbeOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, + (char *)pKey, P3_KEYINFO_HANDOFF); + nCol = pIdx->nColumn; + if( iMem+nCol*2>=pParse->nMem ){ + pParse->nMem = iMem+nCol*2+1; + } + sqlite3VdbeAddOp(v, OP_SetNumColumns, iIdxCur, nCol+1); + + /* Memory cells are used as follows: + ** + ** mem[iMem]: The total number of rows in the table. + ** mem[iMem+1]: Number of distinct values in column 1 + ** ... + ** mem[iMem+nCol]: Number of distinct values in column N + ** mem[iMem+nCol+1] Last observed value of column 1 + ** ... + ** mem[iMem+nCol+nCol]: Last observed value of column N + ** + ** Cells iMem through iMem+nCol are initialized to 0. The others + ** are initialized to NULL. + */ + for(i=0; i<=nCol; i++){ + sqlite3VdbeAddOp(v, OP_MemInt, 0, iMem+i); + } + for(i=0; i0 then it is always the case the D>0 so division by zero + ** is never possible. + */ + sqlite3VdbeAddOp(v, OP_MemLoad, iMem, 0); + addr = sqlite3VdbeAddOp(v, OP_IfNot, 0, 0); + sqlite3VdbeAddOp(v, OP_NewRowid, iStatCur, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->zName, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pIdx->zName, 0); + sqlite3VdbeAddOp(v, OP_MemLoad, iMem, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, " ", 0); + for(i=0; idb; + Schema *pSchema = db->aDb[iDb].pSchema; /* Schema of database iDb */ + HashElem *k; + int iStatCur; + int iMem; + + sqlite3BeginWriteOperation(pParse, 0, iDb); + iStatCur = pParse->nTab++; + openStatTable(pParse, iDb, iStatCur, 0); + iMem = pParse->nMem; + for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ + Table *pTab = (Table*)sqliteHashData(k); + analyzeOneTable(pParse, pTab, iStatCur, iMem); + } + loadAnalysis(pParse, iDb); +} + +/* +** Generate code that will do an analysis of a single table in +** a database. +*/ +static void analyzeTable(Parse *pParse, Table *pTab){ + int iDb; + int iStatCur; + + assert( pTab!=0 ); + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + sqlite3BeginWriteOperation(pParse, 0, iDb); + iStatCur = pParse->nTab++; + openStatTable(pParse, iDb, iStatCur, pTab->zName); + analyzeOneTable(pParse, pTab, iStatCur, pParse->nMem); + loadAnalysis(pParse, iDb); +} + +/* +** Generate code for the ANALYZE command. The parser calls this routine +** when it recognizes an ANALYZE command. +** +** ANALYZE -- 1 +** ANALYZE -- 2 +** ANALYZE ?.? -- 3 +** +** Form 1 causes all indices in all attached databases to be analyzed. +** Form 2 analyzes all indices the single database named. +** Form 3 analyzes all indices associated with the named table. +*/ +void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){ + sqlite3 *db = pParse->db; + int iDb; + int i; + char *z, *zDb; + Table *pTab; + Token *pTableName; + + /* Read the database schema. If an error occurs, leave an error message + ** and code in pParse and return NULL. */ + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + return; + } + + if( pName1==0 ){ + /* Form 1: Analyze everything */ + for(i=0; inDb; i++){ + if( i==1 ) continue; /* Do not analyze the TEMP database */ + analyzeDatabase(pParse, i); + } + }else if( pName2==0 || pName2->n==0 ){ + /* Form 2: Analyze the database or table named */ + iDb = sqlite3FindDb(db, pName1); + if( iDb>=0 ){ + analyzeDatabase(pParse, iDb); + }else{ + z = sqlite3NameFromToken(db, pName1); + pTab = sqlite3LocateTable(pParse, z, 0); + sqlite3_free(z); + if( pTab ){ + analyzeTable(pParse, pTab); + } + } + }else{ + /* Form 3: Analyze the fully qualified table name */ + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName); + if( iDb>=0 ){ + zDb = db->aDb[iDb].zName; + z = sqlite3NameFromToken(db, pTableName); + if( z ){ + pTab = sqlite3LocateTable(pParse, z, zDb); + sqlite3_free(z); + if( pTab ){ + analyzeTable(pParse, pTab); + } + } + } + } +} + +/* +** Used to pass information from the analyzer reader through to the +** callback routine. +*/ +typedef struct analysisInfo analysisInfo; +struct analysisInfo { + sqlite3 *db; + const char *zDatabase; +}; + +/* +** This callback is invoked once for each index when reading the +** sqlite_stat1 table. +** +** argv[0] = name of the index +** argv[1] = results of analysis - on integer for each column +*/ +static int analysisLoader(void *pData, int argc, char **argv, char **azNotUsed){ + analysisInfo *pInfo = (analysisInfo*)pData; + Index *pIndex; + int i, c; + unsigned int v; + const char *z; + + assert( argc==2 ); + if( argv==0 || argv[0]==0 || argv[1]==0 ){ + return 0; + } + pIndex = sqlite3FindIndex(pInfo->db, argv[0], pInfo->zDatabase); + if( pIndex==0 ){ + return 0; + } + z = argv[1]; + for(i=0; *z && i<=pIndex->nColumn; i++){ + v = 0; + while( (c=z[0])>='0' && c<='9' ){ + v = v*10 + c - '0'; + z++; + } + pIndex->aiRowEst[i] = v; + if( *z==' ' ) z++; + } + return 0; +} + +/* +** Load the content of the sqlite_stat1 table into the index hash tables. +*/ +int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ + analysisInfo sInfo; + HashElem *i; + char *zSql; + int rc; + + assert( iDb>=0 && iDbnDb ); + assert( db->aDb[iDb].pBt!=0 ); + assert( sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); + + /* Clear any prior statistics */ + for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ + Index *pIdx = sqliteHashData(i); + sqlite3DefaultRowEst(pIdx); + } + + /* Check to make sure the sqlite_stat1 table existss */ + sInfo.db = db; + sInfo.zDatabase = db->aDb[iDb].zName; + if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)==0 ){ + return SQLITE_ERROR; + } + + + /* Load new statistics out of the sqlite_stat1 table */ + zSql = sqlite3MPrintf(db, "SELECT idx, stat FROM %Q.sqlite_stat1", + sInfo.zDatabase); + sqlite3SafetyOff(db); + rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); + sqlite3SafetyOn(db); + sqlite3_free(zSql); + return rc; +} + + +#endif /* SQLITE_OMIT_ANALYZE */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/attach.c b/libraries/sqlite/unix/sqlite-3.5.1/src/attach.c new file mode 100644 index 0000000000..2fb950fd4a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/attach.c @@ -0,0 +1,521 @@ +/* +** 2003 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to implement the ATTACH and DETACH commands. +** +** $Id: attach.c,v 1.63 2007/10/03 08:46:44 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +#ifndef SQLITE_OMIT_ATTACH +/* +** Resolve an expression that was part of an ATTACH or DETACH statement. This +** is slightly different from resolving a normal SQL expression, because simple +** identifiers are treated as strings, not possible column names or aliases. +** +** i.e. if the parser sees: +** +** ATTACH DATABASE abc AS def +** +** it treats the two expressions as literal strings 'abc' and 'def' instead of +** looking for columns of the same name. +** +** This only applies to the root node of pExpr, so the statement: +** +** ATTACH DATABASE abc||def AS 'db2' +** +** will fail because neither abc or def can be resolved. +*/ +static int resolveAttachExpr(NameContext *pName, Expr *pExpr) +{ + int rc = SQLITE_OK; + if( pExpr ){ + if( pExpr->op!=TK_ID ){ + rc = sqlite3ExprResolveNames(pName, pExpr); + if( rc==SQLITE_OK && !sqlite3ExprIsConstant(pExpr) ){ + sqlite3ErrorMsg(pName->pParse, "invalid name: \"%T\"", &pExpr->span); + return SQLITE_ERROR; + } + }else{ + pExpr->op = TK_STRING; + } + } + return rc; +} + +/* +** An SQL user-function registered to do the work of an ATTACH statement. The +** three arguments to the function come directly from an attach statement: +** +** ATTACH DATABASE x AS y KEY z +** +** SELECT sqlite_attach(x, y, z) +** +** If the optional "KEY z" syntax is omitted, an SQL NULL is passed as the +** third argument. +*/ +static void attachFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int i; + int rc = 0; + sqlite3 *db = sqlite3_user_data(context); + const char *zName; + const char *zFile; + Db *aNew; + char *zErrDyn = 0; + char zErr[128]; + + zFile = (const char *)sqlite3_value_text(argv[0]); + zName = (const char *)sqlite3_value_text(argv[1]); + if( zFile==0 ) zFile = ""; + if( zName==0 ) zName = ""; + + /* Check for the following errors: + ** + ** * Too many attached databases, + ** * Transaction currently open + ** * Specified database name already being used. + */ + if( db->nDb>=SQLITE_MAX_ATTACHED+2 ){ + sqlite3_snprintf( + sizeof(zErr), zErr, "too many attached databases - max %d", + SQLITE_MAX_ATTACHED + ); + goto attach_error; + } + if( !db->autoCommit ){ + sqlite3_snprintf(sizeof(zErr), zErr, + "cannot ATTACH database within transaction"); + goto attach_error; + } + for(i=0; inDb; i++){ + char *z = db->aDb[i].zName; + if( z && zName && sqlite3StrICmp(z, zName)==0 ){ + sqlite3_snprintf(sizeof(zErr), zErr, + "database %s is already in use", zName); + goto attach_error; + } + } + + /* Allocate the new entry in the db->aDb[] array and initialise the schema + ** hash tables. + */ + if( db->aDb==db->aDbStatic ){ + aNew = sqlite3_malloc( sizeof(db->aDb[0])*3 ); + if( aNew==0 ){ + db->mallocFailed = 1; + return; + } + memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); + }else{ + aNew = sqlite3_realloc(db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); + if( aNew==0 ){ + db->mallocFailed = 1; + return; + } + } + db->aDb = aNew; + aNew = &db->aDb[db->nDb++]; + memset(aNew, 0, sizeof(*aNew)); + + /* Open the database file. If the btree is successfully opened, use + ** it to obtain the database schema. At this point the schema may + ** or may not be initialised. + */ + rc = sqlite3BtreeFactory(db, zFile, 0, SQLITE_DEFAULT_CACHE_SIZE, + db->openFlags | SQLITE_OPEN_MAIN_DB, + &aNew->pBt); + if( rc==SQLITE_OK ){ + aNew->pSchema = sqlite3SchemaGet(db, aNew->pBt); + if( !aNew->pSchema ){ + rc = SQLITE_NOMEM; + }else if( aNew->pSchema->file_format && aNew->pSchema->enc!=ENC(db) ){ + sqlite3_snprintf(sizeof(zErr), zErr, + "attached databases must use the same text encoding as main database"); + goto attach_error; + } + sqlite3PagerLockingMode(sqlite3BtreePager(aNew->pBt), db->dfltLockMode); + } + aNew->zName = sqlite3DbStrDup(db, zName); + aNew->safety_level = 3; + +#if SQLITE_HAS_CODEC + { + extern int sqlite3CodecAttach(sqlite3*, int, const void*, int); + extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); + int nKey; + char *zKey; + int t = sqlite3_value_type(argv[2]); + switch( t ){ + case SQLITE_INTEGER: + case SQLITE_FLOAT: + zErrDyn = sqlite3DbStrDup(db, "Invalid key value"); + rc = SQLITE_ERROR; + break; + + case SQLITE_TEXT: + case SQLITE_BLOB: + nKey = sqlite3_value_bytes(argv[2]); + zKey = (char *)sqlite3_value_blob(argv[2]); + sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); + break; + + case SQLITE_NULL: + /* No key specified. Use the key from the main database */ + sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); + sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); + break; + } + } +#endif + + /* If the file was opened successfully, read the schema for the new database. + ** If this fails, or if opening the file failed, then close the file and + ** remove the entry from the db->aDb[] array. i.e. put everything back the way + ** we found it. + */ + if( rc==SQLITE_OK ){ + sqlite3SafetyOn(db); + rc = sqlite3Init(db, &zErrDyn); + sqlite3SafetyOff(db); + } + if( rc ){ + int iDb = db->nDb - 1; + assert( iDb>=2 ); + if( db->aDb[iDb].pBt ){ + sqlite3BtreeClose(db->aDb[iDb].pBt); + db->aDb[iDb].pBt = 0; + db->aDb[iDb].pSchema = 0; + } + sqlite3ResetInternalSchema(db, 0); + db->nDb = iDb; + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + db->mallocFailed = 1; + sqlite3_snprintf(sizeof(zErr),zErr, "out of memory"); + }else{ + sqlite3_snprintf(sizeof(zErr),zErr, "unable to open database: %s", zFile); + } + goto attach_error; + } + + return; + +attach_error: + /* Return an error if we get here */ + if( zErrDyn ){ + sqlite3_result_error(context, zErrDyn, -1); + sqlite3_free(zErrDyn); + }else{ + zErr[sizeof(zErr)-1] = 0; + sqlite3_result_error(context, zErr, -1); + } +} + +/* +** An SQL user-function registered to do the work of an DETACH statement. The +** three arguments to the function come directly from a detach statement: +** +** DETACH DATABASE x +** +** SELECT sqlite_detach(x) +*/ +static void detachFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *zName = (const char *)sqlite3_value_text(argv[0]); + sqlite3 *db = sqlite3_user_data(context); + int i; + Db *pDb = 0; + char zErr[128]; + + if( zName==0 ) zName = ""; + for(i=0; inDb; i++){ + pDb = &db->aDb[i]; + if( pDb->pBt==0 ) continue; + if( sqlite3StrICmp(pDb->zName, zName)==0 ) break; + } + + if( i>=db->nDb ){ + sqlite3_snprintf(sizeof(zErr),zErr, "no such database: %s", zName); + goto detach_error; + } + if( i<2 ){ + sqlite3_snprintf(sizeof(zErr),zErr, "cannot detach database %s", zName); + goto detach_error; + } + if( !db->autoCommit ){ + sqlite3_snprintf(sizeof(zErr), zErr, + "cannot DETACH database within transaction"); + goto detach_error; + } + if( sqlite3BtreeIsInReadTrans(pDb->pBt) ){ + sqlite3_snprintf(sizeof(zErr),zErr, "database %s is locked", zName); + goto detach_error; + } + + sqlite3BtreeClose(pDb->pBt); + pDb->pBt = 0; + pDb->pSchema = 0; + sqlite3ResetInternalSchema(db, 0); + return; + +detach_error: + sqlite3_result_error(context, zErr, -1); +} + +/* +** This procedure generates VDBE code for a single invocation of either the +** sqlite_detach() or sqlite_attach() SQL user functions. +*/ +static void codeAttach( + Parse *pParse, /* The parser context */ + int type, /* Either SQLITE_ATTACH or SQLITE_DETACH */ + const char *zFunc, /* Either "sqlite_attach" or "sqlite_detach */ + int nFunc, /* Number of args to pass to zFunc */ + Expr *pAuthArg, /* Expression to pass to authorization callback */ + Expr *pFilename, /* Name of database file */ + Expr *pDbname, /* Name of the database to use internally */ + Expr *pKey /* Database key for encryption extension */ +){ + int rc; + NameContext sName; + Vdbe *v; + FuncDef *pFunc; + sqlite3* db = pParse->db; + +#ifndef SQLITE_OMIT_AUTHORIZATION + assert( db->mallocFailed || pAuthArg ); + if( pAuthArg ){ + char *zAuthArg = sqlite3NameFromToken(db, &pAuthArg->span); + if( !zAuthArg ){ + goto attach_end; + } + rc = sqlite3AuthCheck(pParse, type, zAuthArg, 0, 0); + sqlite3_free(zAuthArg); + if(rc!=SQLITE_OK ){ + goto attach_end; + } + } +#endif /* SQLITE_OMIT_AUTHORIZATION */ + + memset(&sName, 0, sizeof(NameContext)); + sName.pParse = pParse; + + if( + SQLITE_OK!=(rc = resolveAttachExpr(&sName, pFilename)) || + SQLITE_OK!=(rc = resolveAttachExpr(&sName, pDbname)) || + SQLITE_OK!=(rc = resolveAttachExpr(&sName, pKey)) + ){ + pParse->nErr++; + goto attach_end; + } + + v = sqlite3GetVdbe(pParse); + sqlite3ExprCode(pParse, pFilename); + sqlite3ExprCode(pParse, pDbname); + sqlite3ExprCode(pParse, pKey); + + assert( v || db->mallocFailed ); + if( v ){ + sqlite3VdbeAddOp(v, OP_Function, 0, nFunc); + pFunc = sqlite3FindFunction(db, zFunc, strlen(zFunc), nFunc, SQLITE_UTF8,0); + sqlite3VdbeChangeP3(v, -1, (char *)pFunc, P3_FUNCDEF); + + /* Code an OP_Expire. For an ATTACH statement, set P1 to true (expire this + ** statement only). For DETACH, set it to false (expire all existing + ** statements). + */ + sqlite3VdbeAddOp(v, OP_Expire, (type==SQLITE_ATTACH), 0); + } + +attach_end: + sqlite3ExprDelete(pFilename); + sqlite3ExprDelete(pDbname); + sqlite3ExprDelete(pKey); +} + +/* +** Called by the parser to compile a DETACH statement. +** +** DETACH pDbname +*/ +void sqlite3Detach(Parse *pParse, Expr *pDbname){ + codeAttach(pParse, SQLITE_DETACH, "sqlite_detach", 1, pDbname, 0, 0, pDbname); +} + +/* +** Called by the parser to compile an ATTACH statement. +** +** ATTACH p AS pDbname KEY pKey +*/ +void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *pKey){ + codeAttach(pParse, SQLITE_ATTACH, "sqlite_attach", 3, p, p, pDbname, pKey); +} +#endif /* SQLITE_OMIT_ATTACH */ + +/* +** Register the functions sqlite_attach and sqlite_detach. +*/ +void sqlite3AttachFunctions(sqlite3 *db){ +#ifndef SQLITE_OMIT_ATTACH + static const int enc = SQLITE_UTF8; + sqlite3CreateFunc(db, "sqlite_attach", 3, enc, db, attachFunc, 0, 0); + sqlite3CreateFunc(db, "sqlite_detach", 1, enc, db, detachFunc, 0, 0); +#endif +} + +/* +** Initialize a DbFixer structure. This routine must be called prior +** to passing the structure to one of the sqliteFixAAAA() routines below. +** +** The return value indicates whether or not fixation is required. TRUE +** means we do need to fix the database references, FALSE means we do not. +*/ +int sqlite3FixInit( + DbFixer *pFix, /* The fixer to be initialized */ + Parse *pParse, /* Error messages will be written here */ + int iDb, /* This is the database that must be used */ + const char *zType, /* "view", "trigger", or "index" */ + const Token *pName /* Name of the view, trigger, or index */ +){ + sqlite3 *db; + + if( iDb<0 || iDb==1 ) return 0; + db = pParse->db; + assert( db->nDb>iDb ); + pFix->pParse = pParse; + pFix->zDb = db->aDb[iDb].zName; + pFix->zType = zType; + pFix->pName = pName; + return 1; +} + +/* +** The following set of routines walk through the parse tree and assign +** a specific database to all table references where the database name +** was left unspecified in the original SQL statement. The pFix structure +** must have been initialized by a prior call to sqlite3FixInit(). +** +** These routines are used to make sure that an index, trigger, or +** view in one database does not refer to objects in a different database. +** (Exception: indices, triggers, and views in the TEMP database are +** allowed to refer to anything.) If a reference is explicitly made +** to an object in a different database, an error message is added to +** pParse->zErrMsg and these routines return non-zero. If everything +** checks out, these routines return 0. +*/ +int sqlite3FixSrcList( + DbFixer *pFix, /* Context of the fixation */ + SrcList *pList /* The Source list to check and modify */ +){ + int i; + const char *zDb; + struct SrcList_item *pItem; + + if( pList==0 ) return 0; + zDb = pFix->zDb; + for(i=0, pItem=pList->a; inSrc; i++, pItem++){ + if( pItem->zDatabase==0 ){ + pItem->zDatabase = sqlite3DbStrDup(pFix->pParse->db, zDb); + }else if( sqlite3StrICmp(pItem->zDatabase,zDb)!=0 ){ + sqlite3ErrorMsg(pFix->pParse, + "%s %T cannot reference objects in database %s", + pFix->zType, pFix->pName, pItem->zDatabase); + return 1; + } +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) + if( sqlite3FixSelect(pFix, pItem->pSelect) ) return 1; + if( sqlite3FixExpr(pFix, pItem->pOn) ) return 1; +#endif + } + return 0; +} +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) +int sqlite3FixSelect( + DbFixer *pFix, /* Context of the fixation */ + Select *pSelect /* The SELECT statement to be fixed to one database */ +){ + while( pSelect ){ + if( sqlite3FixExprList(pFix, pSelect->pEList) ){ + return 1; + } + if( sqlite3FixSrcList(pFix, pSelect->pSrc) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pSelect->pWhere) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pSelect->pHaving) ){ + return 1; + } + pSelect = pSelect->pPrior; + } + return 0; +} +int sqlite3FixExpr( + DbFixer *pFix, /* Context of the fixation */ + Expr *pExpr /* The expression to be fixed to one database */ +){ + while( pExpr ){ + if( sqlite3FixSelect(pFix, pExpr->pSelect) ){ + return 1; + } + if( sqlite3FixExprList(pFix, pExpr->pList) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pExpr->pRight) ){ + return 1; + } + pExpr = pExpr->pLeft; + } + return 0; +} +int sqlite3FixExprList( + DbFixer *pFix, /* Context of the fixation */ + ExprList *pList /* The expression to be fixed to one database */ +){ + int i; + struct ExprList_item *pItem; + if( pList==0 ) return 0; + for(i=0, pItem=pList->a; inExpr; i++, pItem++){ + if( sqlite3FixExpr(pFix, pItem->pExpr) ){ + return 1; + } + } + return 0; +} +#endif + +#ifndef SQLITE_OMIT_TRIGGER +int sqlite3FixTriggerStep( + DbFixer *pFix, /* Context of the fixation */ + TriggerStep *pStep /* The trigger step be fixed to one database */ +){ + while( pStep ){ + if( sqlite3FixSelect(pFix, pStep->pSelect) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pStep->pWhere) ){ + return 1; + } + if( sqlite3FixExprList(pFix, pStep->pExprList) ){ + return 1; + } + pStep = pStep->pNext; + } + return 0; +} +#endif diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/auth.c b/libraries/sqlite/unix/sqlite-3.5.1/src/auth.c new file mode 100644 index 0000000000..5630c239f5 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/auth.c @@ -0,0 +1,234 @@ +/* +** 2003 January 11 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to implement the sqlite3_set_authorizer() +** API. This facility is an optional feature of the library. Embedded +** systems that do not need this facility may omit it by recompiling +** the library with -DSQLITE_OMIT_AUTHORIZATION=1 +** +** $Id: auth.c,v 1.29 2007/09/18 15:55:07 drh Exp $ +*/ +#include "sqliteInt.h" + +/* +** All of the code in this file may be omitted by defining a single +** macro. +*/ +#ifndef SQLITE_OMIT_AUTHORIZATION + +/* +** Set or clear the access authorization function. +** +** The access authorization function is be called during the compilation +** phase to verify that the user has read and/or write access permission on +** various fields of the database. The first argument to the auth function +** is a copy of the 3rd argument to this routine. The second argument +** to the auth function is one of these constants: +** +** SQLITE_CREATE_INDEX +** SQLITE_CREATE_TABLE +** SQLITE_CREATE_TEMP_INDEX +** SQLITE_CREATE_TEMP_TABLE +** SQLITE_CREATE_TEMP_TRIGGER +** SQLITE_CREATE_TEMP_VIEW +** SQLITE_CREATE_TRIGGER +** SQLITE_CREATE_VIEW +** SQLITE_DELETE +** SQLITE_DROP_INDEX +** SQLITE_DROP_TABLE +** SQLITE_DROP_TEMP_INDEX +** SQLITE_DROP_TEMP_TABLE +** SQLITE_DROP_TEMP_TRIGGER +** SQLITE_DROP_TEMP_VIEW +** SQLITE_DROP_TRIGGER +** SQLITE_DROP_VIEW +** SQLITE_INSERT +** SQLITE_PRAGMA +** SQLITE_READ +** SQLITE_SELECT +** SQLITE_TRANSACTION +** SQLITE_UPDATE +** +** The third and fourth arguments to the auth function are the name of +** the table and the column that are being accessed. The auth function +** should return either SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE. If +** SQLITE_OK is returned, it means that access is allowed. SQLITE_DENY +** means that the SQL statement will never-run - the sqlite3_exec() call +** will return with an error. SQLITE_IGNORE means that the SQL statement +** should run but attempts to read the specified column will return NULL +** and attempts to write the column will be ignored. +** +** Setting the auth function to NULL disables this hook. The default +** setting of the auth function is NULL. +*/ +int sqlite3_set_authorizer( + sqlite3 *db, + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), + void *pArg +){ + sqlite3_mutex_enter(db->mutex); + db->xAuth = xAuth; + db->pAuthArg = pArg; + sqlite3ExpirePreparedStatements(db); + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +/* +** Write an error message into pParse->zErrMsg that explains that the +** user-supplied authorization function returned an illegal value. +*/ +static void sqliteAuthBadReturnCode(Parse *pParse, int rc){ + sqlite3ErrorMsg(pParse, "illegal return value (%d) from the " + "authorization function - should be SQLITE_OK, SQLITE_IGNORE, " + "or SQLITE_DENY", rc); + pParse->rc = SQLITE_ERROR; +} + +/* +** The pExpr should be a TK_COLUMN expression. The table referred to +** is in pTabList or else it is the NEW or OLD table of a trigger. +** Check to see if it is OK to read this particular column. +** +** If the auth function returns SQLITE_IGNORE, change the TK_COLUMN +** instruction into a TK_NULL. If the auth function returns SQLITE_DENY, +** then generate an error. +*/ +void sqlite3AuthRead( + Parse *pParse, /* The parser context */ + Expr *pExpr, /* The expression to check authorization on */ + Schema *pSchema, /* The schema of the expression */ + SrcList *pTabList /* All table that pExpr might refer to */ +){ + sqlite3 *db = pParse->db; + int rc; + Table *pTab = 0; /* The table being read */ + const char *zCol; /* Name of the column of the table */ + int iSrc; /* Index in pTabList->a[] of table being read */ + const char *zDBase; /* Name of database being accessed */ + TriggerStack *pStack; /* The stack of current triggers */ + int iDb; /* The index of the database the expression refers to */ + + if( db->xAuth==0 ) return; + if( pExpr->op!=TK_COLUMN ) return; + iDb = sqlite3SchemaToIndex(pParse->db, pSchema); + if( iDb<0 ){ + /* An attempt to read a column out of a subquery or other + ** temporary table. */ + return; + } + for(iSrc=0; pTabList && iSrcnSrc; iSrc++){ + if( pExpr->iTable==pTabList->a[iSrc].iCursor ) break; + } + if( iSrc>=0 && pTabList && iSrcnSrc ){ + pTab = pTabList->a[iSrc].pTab; + }else if( (pStack = pParse->trigStack)!=0 ){ + /* This must be an attempt to read the NEW or OLD pseudo-tables + ** of a trigger. + */ + assert( pExpr->iTable==pStack->newIdx || pExpr->iTable==pStack->oldIdx ); + pTab = pStack->pTab; + } + if( pTab==0 ) return; + if( pExpr->iColumn>=0 ){ + assert( pExpr->iColumnnCol ); + zCol = pTab->aCol[pExpr->iColumn].zName; + }else if( pTab->iPKey>=0 ){ + assert( pTab->iPKeynCol ); + zCol = pTab->aCol[pTab->iPKey].zName; + }else{ + zCol = "ROWID"; + } + assert( iDb>=0 && iDbnDb ); + zDBase = db->aDb[iDb].zName; + rc = db->xAuth(db->pAuthArg, SQLITE_READ, pTab->zName, zCol, zDBase, + pParse->zAuthContext); + if( rc==SQLITE_IGNORE ){ + pExpr->op = TK_NULL; + }else if( rc==SQLITE_DENY ){ + if( db->nDb>2 || iDb!=0 ){ + sqlite3ErrorMsg(pParse, "access to %s.%s.%s is prohibited", + zDBase, pTab->zName, zCol); + }else{ + sqlite3ErrorMsg(pParse, "access to %s.%s is prohibited",pTab->zName,zCol); + } + pParse->rc = SQLITE_AUTH; + }else if( rc!=SQLITE_OK ){ + sqliteAuthBadReturnCode(pParse, rc); + } +} + +/* +** Do an authorization check using the code and arguments given. Return +** either SQLITE_OK (zero) or SQLITE_IGNORE or SQLITE_DENY. If SQLITE_DENY +** is returned, then the error count and error message in pParse are +** modified appropriately. +*/ +int sqlite3AuthCheck( + Parse *pParse, + int code, + const char *zArg1, + const char *zArg2, + const char *zArg3 +){ + sqlite3 *db = pParse->db; + int rc; + + /* Don't do any authorization checks if the database is initialising + ** or if the parser is being invoked from within sqlite3_declare_vtab. + */ + if( db->init.busy || IN_DECLARE_VTAB ){ + return SQLITE_OK; + } + + if( db->xAuth==0 ){ + return SQLITE_OK; + } + rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext); + if( rc==SQLITE_DENY ){ + sqlite3ErrorMsg(pParse, "not authorized"); + pParse->rc = SQLITE_AUTH; + }else if( rc!=SQLITE_OK && rc!=SQLITE_IGNORE ){ + rc = SQLITE_DENY; + sqliteAuthBadReturnCode(pParse, rc); + } + return rc; +} + +/* +** Push an authorization context. After this routine is called, the +** zArg3 argument to authorization callbacks will be zContext until +** popped. Or if pParse==0, this routine is a no-op. +*/ +void sqlite3AuthContextPush( + Parse *pParse, + AuthContext *pContext, + const char *zContext +){ + pContext->pParse = pParse; + if( pParse ){ + pContext->zAuthContext = pParse->zAuthContext; + pParse->zAuthContext = zContext; + } +} + +/* +** Pop an authorization context that was previously pushed +** by sqlite3AuthContextPush +*/ +void sqlite3AuthContextPop(AuthContext *pContext){ + if( pContext->pParse ){ + pContext->pParse->zAuthContext = pContext->zAuthContext; + pContext->pParse = 0; + } +} + +#endif /* SQLITE_OMIT_AUTHORIZATION */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/btmutex.c b/libraries/sqlite/unix/sqlite-3.5.1/src/btmutex.c new file mode 100644 index 0000000000..1f63434231 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/btmutex.c @@ -0,0 +1,315 @@ +/* +** 2007 August 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** $Id: btmutex.c,v 1.7 2007/08/30 01:19:59 drh Exp $ +** +** This file contains code used to implement mutexes on Btree objects. +** This code really belongs in btree.c. But btree.c is getting too +** big and we want to break it down some. This packaged seemed like +** a good breakout. +*/ +#include "btreeInt.h" +#if SQLITE_THREADSAFE && !defined(SQLITE_OMIT_SHARED_CACHE) + + +/* +** Enter a mutex on the given BTree object. +** +** If the object is not sharable, then no mutex is ever required +** and this routine is a no-op. The underlying mutex is non-recursive. +** But we keep a reference count in Btree.wantToLock so the behavior +** of this interface is recursive. +** +** To avoid deadlocks, multiple Btrees are locked in the same order +** by all database connections. The p->pNext is a list of other +** Btrees belonging to the same database connection as the p Btree +** which need to be locked after p. If we cannot get a lock on +** p, then first unlock all of the others on p->pNext, then wait +** for the lock to become available on p, then relock all of the +** subsequent Btrees that desire a lock. +*/ +void sqlite3BtreeEnter(Btree *p){ + Btree *pLater; + + /* Some basic sanity checking on the Btree. The list of Btrees + ** connected by pNext and pPrev should be in sorted order by + ** Btree.pBt value. All elements of the list should belong to + ** the same connection. Only shared Btrees are on the list. */ + assert( p->pNext==0 || p->pNext->pBt>p->pBt ); + assert( p->pPrev==0 || p->pPrev->pBtpBt ); + assert( p->pNext==0 || p->pNext->pSqlite==p->pSqlite ); + assert( p->pPrev==0 || p->pPrev->pSqlite==p->pSqlite ); + assert( p->sharable || (p->pNext==0 && p->pPrev==0) ); + + /* Check for locking consistency */ + assert( !p->locked || p->wantToLock>0 ); + assert( p->sharable || p->wantToLock==0 ); + + /* We should already hold a lock on the database connection */ + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + + if( !p->sharable ) return; + p->wantToLock++; + if( p->locked ) return; + + /* In most cases, we should be able to acquire the lock we + ** want without having to go throught the ascending lock + ** procedure that follows. Just be sure not to block. + */ + if( sqlite3_mutex_try(p->pBt->mutex)==SQLITE_OK ){ + p->locked = 1; + return; + } + + /* To avoid deadlock, first release all locks with a larger + ** BtShared address. Then acquire our lock. Then reacquire + ** the other BtShared locks that we used to hold in ascending + ** order. + */ + for(pLater=p->pNext; pLater; pLater=pLater->pNext){ + assert( pLater->sharable ); + assert( pLater->pNext==0 || pLater->pNext->pBt>pLater->pBt ); + assert( !pLater->locked || pLater->wantToLock>0 ); + if( pLater->locked ){ + sqlite3_mutex_leave(pLater->pBt->mutex); + pLater->locked = 0; + } + } + sqlite3_mutex_enter(p->pBt->mutex); + p->locked = 1; + for(pLater=p->pNext; pLater; pLater=pLater->pNext){ + if( pLater->wantToLock ){ + sqlite3_mutex_enter(pLater->pBt->mutex); + pLater->locked = 1; + } + } +} + +/* +** Exit the recursive mutex on a Btree. +*/ +void sqlite3BtreeLeave(Btree *p){ + if( p->sharable ){ + assert( p->wantToLock>0 ); + p->wantToLock--; + if( p->wantToLock==0 ){ + assert( p->locked ); + sqlite3_mutex_leave(p->pBt->mutex); + p->locked = 0; + } + } +} + +#ifndef NDEBUG +/* +** Return true if the BtShared mutex is held on the btree. +** +** This routine makes no determination one why or another if the +** database connection mutex is held. +** +** This routine is used only from within assert() statements. +*/ +int sqlite3BtreeHoldsMutex(Btree *p){ + return (p->sharable==0 || + (p->locked && p->wantToLock && sqlite3_mutex_held(p->pBt->mutex))); +} +#endif + + +#ifndef SQLITE_OMIT_INCRBLOB +/* +** Enter and leave a mutex on a Btree given a cursor owned by that +** Btree. These entry points are used by incremental I/O and can be +** omitted if that module is not used. +*/ +void sqlite3BtreeEnterCursor(BtCursor *pCur){ + sqlite3BtreeEnter(pCur->pBtree); +} +void sqlite3BtreeLeaveCursor(BtCursor *pCur){ + sqlite3BtreeLeave(pCur->pBtree); +} +#endif /* SQLITE_OMIT_INCRBLOB */ + + +/* +** Enter the mutex on every Btree associated with a database +** connection. This is needed (for example) prior to parsing +** a statement since we will be comparing table and column names +** against all schemas and we do not want those schemas being +** reset out from under us. +** +** There is a corresponding leave-all procedures. +** +** Enter the mutexes in accending order by BtShared pointer address +** to avoid the possibility of deadlock when two threads with +** two or more btrees in common both try to lock all their btrees +** at the same instant. +*/ +void sqlite3BtreeEnterAll(sqlite3 *db){ + int i; + Btree *p, *pLater; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inDb; i++){ + p = db->aDb[i].pBt; + if( p && p->sharable ){ + p->wantToLock++; + if( !p->locked ){ + assert( p->wantToLock==1 ); + while( p->pPrev ) p = p->pPrev; + while( p->locked && p->pNext ) p = p->pNext; + for(pLater = p->pNext; pLater; pLater=pLater->pNext){ + if( pLater->locked ){ + sqlite3_mutex_leave(pLater->pBt->mutex); + pLater->locked = 0; + } + } + while( p ){ + sqlite3_mutex_enter(p->pBt->mutex); + p->locked++; + p = p->pNext; + } + } + } + } +} +void sqlite3BtreeLeaveAll(sqlite3 *db){ + int i; + Btree *p; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inDb; i++){ + p = db->aDb[i].pBt; + if( p && p->sharable ){ + assert( p->wantToLock>0 ); + p->wantToLock--; + if( p->wantToLock==0 ){ + assert( p->locked ); + sqlite3_mutex_leave(p->pBt->mutex); + p->locked = 0; + } + } + } +} + +#ifndef NDEBUG +/* +** Return true if the current thread holds the database connection +** mutex and all required BtShared mutexes. +** +** This routine is used inside assert() statements only. +*/ +int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){ + int i; + if( !sqlite3_mutex_held(db->mutex) ){ + return 0; + } + for(i=0; inDb; i++){ + Btree *p; + p = db->aDb[i].pBt; + if( p && p->sharable && + (p->wantToLock==0 || !sqlite3_mutex_held(p->pBt->mutex)) ){ + return 0; + } + } + return 1; +} +#endif /* NDEBUG */ + +/* +** Potentially dd a new Btree pointer to a BtreeMutexArray. +** Really only add the Btree if it can possibly be shared with +** another database connection. +** +** The Btrees are kept in sorted order by pBtree->pBt. That +** way when we go to enter all the mutexes, we can enter them +** in order without every having to backup and retry and without +** worrying about deadlock. +** +** The number of shared btrees will always be small (usually 0 or 1) +** so an insertion sort is an adequate algorithm here. +*/ +void sqlite3BtreeMutexArrayInsert(BtreeMutexArray *pArray, Btree *pBtree){ + int i, j; + BtShared *pBt; + if( pBtree==0 || pBtree->sharable==0 ) return; +#ifndef NDEBUG + { + for(i=0; inMutex; i++){ + assert( pArray->aBtree[i]!=pBtree ); + } + } +#endif + assert( pArray->nMutex>=0 ); + assert( pArray->nMutexaBtree)/sizeof(pArray->aBtree[0])-1 ); + pBt = pBtree->pBt; + for(i=0; inMutex; i++){ + assert( pArray->aBtree[i]!=pBtree ); + if( pArray->aBtree[i]->pBt>pBt ){ + for(j=pArray->nMutex; j>i; j--){ + pArray->aBtree[j] = pArray->aBtree[j-1]; + } + pArray->aBtree[i] = pBtree; + pArray->nMutex++; + return; + } + } + pArray->aBtree[pArray->nMutex++] = pBtree; +} + +/* +** Enter the mutex of every btree in the array. This routine is +** called at the beginning of sqlite3VdbeExec(). The mutexes are +** exited at the end of the same function. +*/ +void sqlite3BtreeMutexArrayEnter(BtreeMutexArray *pArray){ + int i; + for(i=0; inMutex; i++){ + Btree *p = pArray->aBtree[i]; + /* Some basic sanity checking */ + assert( i==0 || pArray->aBtree[i-1]->pBtpBt ); + assert( !p->locked || p->wantToLock>0 ); + + /* We should already hold a lock on the database connection */ + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + + p->wantToLock++; + if( !p->locked && p->sharable ){ + sqlite3_mutex_enter(p->pBt->mutex); + p->locked = 1; + } + } +} + +/* +** Leave the mutex of every btree in the group. +*/ +void sqlite3BtreeMutexArrayLeave(BtreeMutexArray *pArray){ + int i; + for(i=0; inMutex; i++){ + Btree *p = pArray->aBtree[i]; + /* Some basic sanity checking */ + assert( i==0 || pArray->aBtree[i-1]->pBtpBt ); + assert( p->locked || !p->sharable ); + assert( p->wantToLock>0 ); + + /* We should already hold a lock on the database connection */ + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + + p->wantToLock--; + if( p->wantToLock==0 && p->locked ){ + sqlite3_mutex_leave(p->pBt->mutex); + p->locked = 0; + } + } +} + + +#endif /* SQLITE_THREADSAFE && !SQLITE_OMIT_SHARED_CACHE */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/btree.c b/libraries/sqlite/unix/sqlite-3.5.1/src/btree.c new file mode 100644 index 0000000000..de8821a900 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/btree.c @@ -0,0 +1,6890 @@ +/* +** 2004 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** $Id: btree.c,v 1.428 2007/10/03 08:46:44 danielk1977 Exp $ +** +** This file implements a external (disk-based) database using BTrees. +** See the header comment on "btreeInt.h" for additional information. +** Including a description of file format and an overview of operation. +*/ +#include "btreeInt.h" + +/* +** The header string that appears at the beginning of every +** SQLite database. +*/ +static const char zMagicHeader[] = SQLITE_FILE_HEADER; + +/* +** Set this global variable to 1 to enable tracing using the TRACE +** macro. +*/ +#if SQLITE_TEST +int sqlite3_btree_trace=0; /* True to enable tracing */ +#endif + + + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** A flag to indicate whether or not shared cache is enabled. Also, +** a list of BtShared objects that are eligible for participation +** in shared cache. The variables have file scope during normal builds, +** but the test harness needs to access these variables so we make them +** global for test builds. +*/ +#ifdef SQLITE_TEST +BtShared *sqlite3SharedCacheList = 0; +int sqlite3SharedCacheEnabled = 0; +#else +static BtShared *sqlite3SharedCacheList = 0; +static int sqlite3SharedCacheEnabled = 0; +#endif +#endif /* SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Enable or disable the shared pager and schema features. +** +** This routine has no effect on existing database connections. +** The shared cache setting effects only future calls to +** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2(). +*/ +int sqlite3_enable_shared_cache(int enable){ + sqlite3SharedCacheEnabled = enable; + return SQLITE_OK; +} +#endif + + +/* +** Forward declaration +*/ +static int checkReadLocks(Btree*,Pgno,BtCursor*); + + +#ifdef SQLITE_OMIT_SHARED_CACHE + /* + ** The functions queryTableLock(), lockTable() and unlockAllTables() + ** manipulate entries in the BtShared.pLock linked list used to store + ** shared-cache table level locks. If the library is compiled with the + ** shared-cache feature disabled, then there is only ever one user + ** of each BtShared structure and so this locking is not necessary. + ** So define the lock related functions as no-ops. + */ + #define queryTableLock(a,b,c) SQLITE_OK + #define lockTable(a,b,c) SQLITE_OK + #define unlockAllTables(a) +#endif + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Query to see if btree handle p may obtain a lock of type eLock +** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return +** SQLITE_OK if the lock may be obtained (by calling lockTable()), or +** SQLITE_LOCKED if not. +*/ +static int queryTableLock(Btree *p, Pgno iTab, u8 eLock){ + BtShared *pBt = p->pBt; + BtLock *pIter; + + assert( sqlite3BtreeHoldsMutex(p) ); + + /* This is a no-op if the shared-cache is not enabled */ + if( !p->sharable ){ + return SQLITE_OK; + } + + /* This (along with lockTable()) is where the ReadUncommitted flag is + ** dealt with. If the caller is querying for a read-lock and the flag is + ** set, it is unconditionally granted - even if there are write-locks + ** on the table. If a write-lock is requested, the ReadUncommitted flag + ** is not considered. + ** + ** In function lockTable(), if a read-lock is demanded and the + ** ReadUncommitted flag is set, no entry is added to the locks list + ** (BtShared.pLock). + ** + ** To summarize: If the ReadUncommitted flag is set, then read cursors do + ** not create or respect table locks. The locking procedure for a + ** write-cursor does not change. + */ + if( + !p->pSqlite || + 0==(p->pSqlite->flags&SQLITE_ReadUncommitted) || + eLock==WRITE_LOCK || + iTab==MASTER_ROOT + ){ + for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ + if( pIter->pBtree!=p && pIter->iTable==iTab && + (pIter->eLock!=eLock || eLock!=READ_LOCK) ){ + return SQLITE_LOCKED; + } + } + } + return SQLITE_OK; +} +#endif /* !SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Add a lock on the table with root-page iTable to the shared-btree used +** by Btree handle p. Parameter eLock must be either READ_LOCK or +** WRITE_LOCK. +** +** SQLITE_OK is returned if the lock is added successfully. SQLITE_BUSY and +** SQLITE_NOMEM may also be returned. +*/ +static int lockTable(Btree *p, Pgno iTable, u8 eLock){ + BtShared *pBt = p->pBt; + BtLock *pLock = 0; + BtLock *pIter; + + assert( sqlite3BtreeHoldsMutex(p) ); + + /* This is a no-op if the shared-cache is not enabled */ + if( !p->sharable ){ + return SQLITE_OK; + } + + assert( SQLITE_OK==queryTableLock(p, iTable, eLock) ); + + /* If the read-uncommitted flag is set and a read-lock is requested, + ** return early without adding an entry to the BtShared.pLock list. See + ** comment in function queryTableLock() for more info on handling + ** the ReadUncommitted flag. + */ + if( + (p->pSqlite) && + (p->pSqlite->flags&SQLITE_ReadUncommitted) && + (eLock==READ_LOCK) && + iTable!=MASTER_ROOT + ){ + return SQLITE_OK; + } + + /* First search the list for an existing lock on this table. */ + for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ + if( pIter->iTable==iTable && pIter->pBtree==p ){ + pLock = pIter; + break; + } + } + + /* If the above search did not find a BtLock struct associating Btree p + ** with table iTable, allocate one and link it into the list. + */ + if( !pLock ){ + pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock)); + if( !pLock ){ + return SQLITE_NOMEM; + } + pLock->iTable = iTable; + pLock->pBtree = p; + pLock->pNext = pBt->pLock; + pBt->pLock = pLock; + } + + /* Set the BtLock.eLock variable to the maximum of the current lock + ** and the requested lock. This means if a write-lock was already held + ** and a read-lock requested, we don't incorrectly downgrade the lock. + */ + assert( WRITE_LOCK>READ_LOCK ); + if( eLock>pLock->eLock ){ + pLock->eLock = eLock; + } + + return SQLITE_OK; +} +#endif /* !SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Release all the table locks (locks obtained via calls to the lockTable() +** procedure) held by Btree handle p. +*/ +static void unlockAllTables(Btree *p){ + BtLock **ppIter = &p->pBt->pLock; + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( p->sharable || 0==*ppIter ); + + while( *ppIter ){ + BtLock *pLock = *ppIter; + if( pLock->pBtree==p ){ + *ppIter = pLock->pNext; + sqlite3_free(pLock); + }else{ + ppIter = &pLock->pNext; + } + } +} +#endif /* SQLITE_OMIT_SHARED_CACHE */ + +static void releasePage(MemPage *pPage); /* Forward reference */ + +/* +** Verify that the cursor holds a mutex on the BtShared +*/ +#ifndef NDEBUG +static int cursorHoldsMutex(BtCursor *p){ + return sqlite3_mutex_held(p->pBt->mutex); +} +#endif + + +#ifndef SQLITE_OMIT_INCRBLOB +/* +** Invalidate the overflow page-list cache for cursor pCur, if any. +*/ +static void invalidateOverflowCache(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + sqlite3_free(pCur->aOverflow); + pCur->aOverflow = 0; +} + +/* +** Invalidate the overflow page-list cache for all cursors opened +** on the shared btree structure pBt. +*/ +static void invalidateAllOverflowCache(BtShared *pBt){ + BtCursor *p; + assert( sqlite3_mutex_held(pBt->mutex) ); + for(p=pBt->pCursor; p; p=p->pNext){ + invalidateOverflowCache(p); + } +} +#else + #define invalidateOverflowCache(x) + #define invalidateAllOverflowCache(x) +#endif + +/* +** Save the current cursor position in the variables BtCursor.nKey +** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK. +*/ +static int saveCursorPosition(BtCursor *pCur){ + int rc; + + assert( CURSOR_VALID==pCur->eState ); + assert( 0==pCur->pKey ); + assert( cursorHoldsMutex(pCur) ); + + rc = sqlite3BtreeKeySize(pCur, &pCur->nKey); + + /* If this is an intKey table, then the above call to BtreeKeySize() + ** stores the integer key in pCur->nKey. In this case this value is + ** all that is required. Otherwise, if pCur is not open on an intKey + ** table, then malloc space for and store the pCur->nKey bytes of key + ** data. + */ + if( rc==SQLITE_OK && 0==pCur->pPage->intKey){ + void *pKey = sqlite3_malloc(pCur->nKey); + if( pKey ){ + rc = sqlite3BtreeKey(pCur, 0, pCur->nKey, pKey); + if( rc==SQLITE_OK ){ + pCur->pKey = pKey; + }else{ + sqlite3_free(pKey); + } + }else{ + rc = SQLITE_NOMEM; + } + } + assert( !pCur->pPage->intKey || !pCur->pKey ); + + if( rc==SQLITE_OK ){ + releasePage(pCur->pPage); + pCur->pPage = 0; + pCur->eState = CURSOR_REQUIRESEEK; + } + + invalidateOverflowCache(pCur); + return rc; +} + +/* +** Save the positions of all cursors except pExcept open on the table +** with root-page iRoot. Usually, this is called just before cursor +** pExcept is used to modify the table (BtreeDelete() or BtreeInsert()). +*/ +static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){ + BtCursor *p; + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pExcept==0 || pExcept->pBt==pBt ); + for(p=pBt->pCursor; p; p=p->pNext){ + if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) && + p->eState==CURSOR_VALID ){ + int rc = saveCursorPosition(p); + if( SQLITE_OK!=rc ){ + return rc; + } + } + } + return SQLITE_OK; +} + +/* +** Clear the current cursor position. +*/ +static void clearCursorPosition(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + sqlite3_free(pCur->pKey); + pCur->pKey = 0; + pCur->eState = CURSOR_INVALID; +} + +/* +** Restore the cursor to the position it was in (or as close to as possible) +** when saveCursorPosition() was called. Note that this call deletes the +** saved position info stored by saveCursorPosition(), so there can be +** at most one effective restoreOrClearCursorPosition() call after each +** saveCursorPosition(). +** +** If the second argument argument - doSeek - is false, then instead of +** returning the cursor to it's saved position, any saved position is deleted +** and the cursor state set to CURSOR_INVALID. +*/ +int sqlite3BtreeRestoreOrClearCursorPosition(BtCursor *pCur){ + int rc; + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState>=CURSOR_REQUIRESEEK ); + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skip; + } +#ifndef SQLITE_OMIT_INCRBLOB + if( pCur->isIncrblobHandle ){ + return SQLITE_ABORT; + } +#endif + pCur->eState = CURSOR_INVALID; + rc = sqlite3BtreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &pCur->skip); + if( rc==SQLITE_OK ){ + sqlite3_free(pCur->pKey); + pCur->pKey = 0; + assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID ); + } + return rc; +} + +#define restoreOrClearCursorPosition(p) \ + (p->eState>=CURSOR_REQUIRESEEK ? \ + sqlite3BtreeRestoreOrClearCursorPosition(p) : \ + SQLITE_OK) + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** Given a page number of a regular database page, return the page +** number for the pointer-map page that contains the entry for the +** input page number. +*/ +static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){ + int nPagesPerMapPage, iPtrMap, ret; + assert( sqlite3_mutex_held(pBt->mutex) ); + nPagesPerMapPage = (pBt->usableSize/5)+1; + iPtrMap = (pgno-2)/nPagesPerMapPage; + ret = (iPtrMap*nPagesPerMapPage) + 2; + if( ret==PENDING_BYTE_PAGE(pBt) ){ + ret++; + } + return ret; +} + +/* +** Write an entry into the pointer map. +** +** This routine updates the pointer map entry for page number 'key' +** so that it maps to type 'eType' and parent page number 'pgno'. +** An error code is returned if something goes wrong, otherwise SQLITE_OK. +*/ +static int ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent){ + DbPage *pDbPage; /* The pointer map page */ + u8 *pPtrmap; /* The pointer map data */ + Pgno iPtrmap; /* The pointer map page number */ + int offset; /* Offset in pointer map page */ + int rc; + + assert( sqlite3_mutex_held(pBt->mutex) ); + /* The master-journal page number must never be used as a pointer map page */ + assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) ); + + assert( pBt->autoVacuum ); + if( key==0 ){ + return SQLITE_CORRUPT_BKPT; + } + iPtrmap = PTRMAP_PAGENO(pBt, key); + rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + offset = PTRMAP_PTROFFSET(pBt, key); + pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); + + if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ + TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); + rc = sqlite3PagerWrite(pDbPage); + if( rc==SQLITE_OK ){ + pPtrmap[offset] = eType; + put4byte(&pPtrmap[offset+1], parent); + } + } + + sqlite3PagerUnref(pDbPage); + return rc; +} + +/* +** Read an entry from the pointer map. +** +** This routine retrieves the pointer map entry for page 'key', writing +** the type and parent page number to *pEType and *pPgno respectively. +** An error code is returned if something goes wrong, otherwise SQLITE_OK. +*/ +static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){ + DbPage *pDbPage; /* The pointer map page */ + int iPtrmap; /* Pointer map page index */ + u8 *pPtrmap; /* Pointer map page data */ + int offset; /* Offset of entry in pointer map */ + int rc; + + assert( sqlite3_mutex_held(pBt->mutex) ); + + iPtrmap = PTRMAP_PAGENO(pBt, key); + rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage); + if( rc!=0 ){ + return rc; + } + pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); + + offset = PTRMAP_PTROFFSET(pBt, key); + assert( pEType!=0 ); + *pEType = pPtrmap[offset]; + if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]); + + sqlite3PagerUnref(pDbPage); + if( *pEType<1 || *pEType>5 ) return SQLITE_CORRUPT_BKPT; + return SQLITE_OK; +} + +#endif /* SQLITE_OMIT_AUTOVACUUM */ + +/* +** Given a btree page and a cell index (0 means the first cell on +** the page, 1 means the second cell, and so forth) return a pointer +** to the cell content. +** +** This routine works only for pages that do not contain overflow cells. +*/ +#define findCell(pPage, iCell) \ + ((pPage)->aData + get2byte(&(pPage)->aData[(pPage)->cellOffset+2*(iCell)])) +#ifdef SQLITE_TEST +u8 *sqlite3BtreeFindCell(MemPage *pPage, int iCell){ + assert( iCell>=0 ); + assert( iCellaData[pPage->hdrOffset+3]) ); + return findCell(pPage, iCell); +} +#endif + +/* +** This a more complex version of sqlite3BtreeFindCell() that works for +** pages that do contain overflow cells. See insert +*/ +static u8 *findOverflowCell(MemPage *pPage, int iCell){ + int i; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + for(i=pPage->nOverflow-1; i>=0; i--){ + int k; + struct _OvflCell *pOvfl; + pOvfl = &pPage->aOvfl[i]; + k = pOvfl->idx; + if( k<=iCell ){ + if( k==iCell ){ + return pOvfl->pCell; + } + iCell--; + } + } + return findCell(pPage, iCell); +} + +/* +** Parse a cell content block and fill in the CellInfo structure. There +** are two versions of this function. sqlite3BtreeParseCell() takes a +** cell index as the second argument and sqlite3BtreeParseCellPtr() +** takes a pointer to the body of the cell as its second argument. +** +** Within this file, the parseCell() macro can be called instead of +** sqlite3BtreeParseCellPtr(). Using some compilers, this will be faster. +*/ +void sqlite3BtreeParseCellPtr( + MemPage *pPage, /* Page containing the cell */ + u8 *pCell, /* Pointer to the cell text. */ + CellInfo *pInfo /* Fill in this structure */ +){ + int n; /* Number bytes in cell content header */ + u32 nPayload; /* Number of bytes of cell payload */ + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + + pInfo->pCell = pCell; + assert( pPage->leaf==0 || pPage->leaf==1 ); + n = pPage->childPtrSize; + assert( n==4-4*pPage->leaf ); + if( pPage->hasData ){ + n += getVarint32(&pCell[n], &nPayload); + }else{ + nPayload = 0; + } + pInfo->nData = nPayload; + if( pPage->intKey ){ + n += getVarint(&pCell[n], (u64 *)&pInfo->nKey); + }else{ + u32 x; + n += getVarint32(&pCell[n], &x); + pInfo->nKey = x; + nPayload += x; + } + pInfo->nPayload = nPayload; + pInfo->nHeader = n; + if( nPayload<=pPage->maxLocal ){ + /* This is the (easy) common case where the entire payload fits + ** on the local page. No overflow is required. + */ + int nSize; /* Total size of cell content in bytes */ + pInfo->nLocal = nPayload; + pInfo->iOverflow = 0; + nSize = nPayload + n; + if( nSize<4 ){ + nSize = 4; /* Minimum cell size is 4 */ + } + pInfo->nSize = nSize; + }else{ + /* If the payload will not fit completely on the local page, we have + ** to decide how much to store locally and how much to spill onto + ** overflow pages. The strategy is to minimize the amount of unused + ** space on overflow pages while keeping the amount of local storage + ** in between minLocal and maxLocal. + ** + ** Warning: changing the way overflow payload is distributed in any + ** way will result in an incompatible file format. + */ + int minLocal; /* Minimum amount of payload held locally */ + int maxLocal; /* Maximum amount of payload held locally */ + int surplus; /* Overflow payload available for local storage */ + + minLocal = pPage->minLocal; + maxLocal = pPage->maxLocal; + surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize - 4); + if( surplus <= maxLocal ){ + pInfo->nLocal = surplus; + }else{ + pInfo->nLocal = minLocal; + } + pInfo->iOverflow = pInfo->nLocal + n; + pInfo->nSize = pInfo->iOverflow + 4; + } +} +#define parseCell(pPage, iCell, pInfo) \ + sqlite3BtreeParseCellPtr((pPage), findCell((pPage), (iCell)), (pInfo)) +void sqlite3BtreeParseCell( + MemPage *pPage, /* Page containing the cell */ + int iCell, /* The cell index. First cell is 0 */ + CellInfo *pInfo /* Fill in this structure */ +){ + parseCell(pPage, iCell, pInfo); +} + +/* +** Compute the total number of bytes that a Cell needs in the cell +** data area of the btree-page. The return number includes the cell +** data header and the local payload, but not any overflow page or +** the space used by the cell pointer. +*/ +#ifndef NDEBUG +static int cellSize(MemPage *pPage, int iCell){ + CellInfo info; + sqlite3BtreeParseCell(pPage, iCell, &info); + return info.nSize; +} +#endif +static int cellSizePtr(MemPage *pPage, u8 *pCell){ + CellInfo info; + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + return info.nSize; +} + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** If the cell pCell, part of page pPage contains a pointer +** to an overflow page, insert an entry into the pointer-map +** for the overflow page. +*/ +static int ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell){ + if( pCell ){ + CellInfo info; + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + assert( (info.nData+(pPage->intKey?0:info.nKey))==info.nPayload ); + if( (info.nData+(pPage->intKey?0:info.nKey))>info.nLocal ){ + Pgno ovfl = get4byte(&pCell[info.iOverflow]); + return ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno); + } + } + return SQLITE_OK; +} +/* +** If the cell with index iCell on page pPage contains a pointer +** to an overflow page, insert an entry into the pointer-map +** for the overflow page. +*/ +static int ptrmapPutOvfl(MemPage *pPage, int iCell){ + u8 *pCell; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pCell = findOverflowCell(pPage, iCell); + return ptrmapPutOvflPtr(pPage, pCell); +} +#endif + + +/* +** Defragment the page given. All Cells are moved to the +** end of the page and all free space is collected into one +** big FreeBlk that occurs in between the header and cell +** pointer array and the cell content area. +*/ +static int defragmentPage(MemPage *pPage){ + int i; /* Loop counter */ + int pc; /* Address of a i-th cell */ + int addr; /* Offset of first byte after cell pointer array */ + int hdr; /* Offset to the page header */ + int size; /* Size of a cell */ + int usableSize; /* Number of usable bytes on a page */ + int cellOffset; /* Offset to the cell pointer array */ + int brk; /* Offset to the cell content area */ + int nCell; /* Number of cells on the page */ + unsigned char *data; /* The page data */ + unsigned char *temp; /* Temp area for cell content */ + + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( pPage->pBt!=0 ); + assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE ); + assert( pPage->nOverflow==0 ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + temp = sqlite3_malloc( pPage->pBt->pageSize ); + if( temp==0 ) return SQLITE_NOMEM; + data = pPage->aData; + hdr = pPage->hdrOffset; + cellOffset = pPage->cellOffset; + nCell = pPage->nCell; + assert( nCell==get2byte(&data[hdr+3]) ); + usableSize = pPage->pBt->usableSize; + brk = get2byte(&data[hdr+5]); + memcpy(&temp[brk], &data[brk], usableSize - brk); + brk = usableSize; + for(i=0; ipBt->usableSize ); + size = cellSizePtr(pPage, &temp[pc]); + brk -= size; + memcpy(&data[brk], &temp[pc], size); + put2byte(pAddr, brk); + } + assert( brk>=cellOffset+2*nCell ); + put2byte(&data[hdr+5], brk); + data[hdr+1] = 0; + data[hdr+2] = 0; + data[hdr+7] = 0; + addr = cellOffset+2*nCell; + memset(&data[addr], 0, brk-addr); + sqlite3_free(temp); + return SQLITE_OK; +} + +/* +** Allocate nByte bytes of space on a page. +** +** Return the index into pPage->aData[] of the first byte of +** the new allocation. Or return 0 if there is not enough free +** space on the page to satisfy the allocation request. +** +** If the page contains nBytes of free space but does not contain +** nBytes of contiguous free space, then this routine automatically +** calls defragementPage() to consolidate all free space before +** allocating the new chunk. +*/ +static int allocateSpace(MemPage *pPage, int nByte){ + int addr, pc, hdr; + int size; + int nFrag; + int top; + int nCell; + int cellOffset; + unsigned char *data; + + data = pPage->aData; + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( pPage->pBt ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + if( nByte<4 ) nByte = 4; + if( pPage->nFreenOverflow>0 ) return 0; + pPage->nFree -= nByte; + hdr = pPage->hdrOffset; + + nFrag = data[hdr+7]; + if( nFrag<60 ){ + /* Search the freelist looking for a slot big enough to satisfy the + ** space request. */ + addr = hdr+1; + while( (pc = get2byte(&data[addr]))>0 ){ + size = get2byte(&data[pc+2]); + if( size>=nByte ){ + if( sizecellOffset; + if( nFrag>=60 || cellOffset + 2*nCell > top - nByte ){ + if( defragmentPage(pPage) ) return 0; + top = get2byte(&data[hdr+5]); + } + top -= nByte; + assert( cellOffset + 2*nCell <= top ); + put2byte(&data[hdr+5], top); + return top; +} + +/* +** Return a section of the pPage->aData to the freelist. +** The first byte of the new free block is pPage->aDisk[start] +** and the size of the block is "size" bytes. +** +** Most of the effort here is involved in coalesing adjacent +** free blocks into a single big free block. +*/ +static void freeSpace(MemPage *pPage, int start, int size){ + int addr, pbegin, hdr; + unsigned char *data = pPage->aData; + + assert( pPage->pBt!=0 ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( start>=pPage->hdrOffset+6+(pPage->leaf?0:4) ); + assert( (start + size)<=pPage->pBt->usableSize ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + if( size<4 ) size = 4; + +#ifdef SQLITE_SECURE_DELETE + /* Overwrite deleted information with zeros when the SECURE_DELETE + ** option is enabled at compile-time */ + memset(&data[start], 0, size); +#endif + + /* Add the space back into the linked list of freeblocks */ + hdr = pPage->hdrOffset; + addr = hdr + 1; + while( (pbegin = get2byte(&data[addr]))0 ){ + assert( pbegin<=pPage->pBt->usableSize-4 ); + assert( pbegin>addr ); + addr = pbegin; + } + assert( pbegin<=pPage->pBt->usableSize-4 ); + assert( pbegin>addr || pbegin==0 ); + put2byte(&data[addr], start); + put2byte(&data[start], pbegin); + put2byte(&data[start+2], size); + pPage->nFree += size; + + /* Coalesce adjacent free blocks */ + addr = pPage->hdrOffset + 1; + while( (pbegin = get2byte(&data[addr]))>0 ){ + int pnext, psize; + assert( pbegin>addr ); + assert( pbegin<=pPage->pBt->usableSize-4 ); + pnext = get2byte(&data[pbegin]); + psize = get2byte(&data[pbegin+2]); + if( pbegin + psize + 3 >= pnext && pnext>0 ){ + int frag = pnext - (pbegin+psize); + assert( frag<=data[pPage->hdrOffset+7] ); + data[pPage->hdrOffset+7] -= frag; + put2byte(&data[pbegin], get2byte(&data[pnext])); + put2byte(&data[pbegin+2], pnext+get2byte(&data[pnext+2])-pbegin); + }else{ + addr = pbegin; + } + } + + /* If the cell content area begins with a freeblock, remove it. */ + if( data[hdr+1]==data[hdr+5] && data[hdr+2]==data[hdr+6] ){ + int top; + pbegin = get2byte(&data[hdr+1]); + memcpy(&data[hdr+1], &data[pbegin], 2); + top = get2byte(&data[hdr+5]); + put2byte(&data[hdr+5], top + get2byte(&data[pbegin+2])); + } +} + +/* +** Decode the flags byte (the first byte of the header) for a page +** and initialize fields of the MemPage structure accordingly. +*/ +static void decodeFlags(MemPage *pPage, int flagByte){ + BtShared *pBt; /* A copy of pPage->pBt */ + + assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pPage->intKey = (flagByte & (PTF_INTKEY|PTF_LEAFDATA))!=0; + pPage->zeroData = (flagByte & PTF_ZERODATA)!=0; + pPage->leaf = (flagByte & PTF_LEAF)!=0; + pPage->childPtrSize = 4*(pPage->leaf==0); + pBt = pPage->pBt; + if( flagByte & PTF_LEAFDATA ){ + pPage->leafData = 1; + pPage->maxLocal = pBt->maxLeaf; + pPage->minLocal = pBt->minLeaf; + }else{ + pPage->leafData = 0; + pPage->maxLocal = pBt->maxLocal; + pPage->minLocal = pBt->minLocal; + } + pPage->hasData = !(pPage->zeroData || (!pPage->leaf && pPage->leafData)); +} + +/* +** Initialize the auxiliary information for a disk block. +** +** The pParent parameter must be a pointer to the MemPage which +** is the parent of the page being initialized. The root of a +** BTree has no parent and so for that page, pParent==NULL. +** +** Return SQLITE_OK on success. If we see that the page does +** not contain a well-formed database page, then return +** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not +** guarantee that the page is well-formed. It only shows that +** we failed to detect any corruption. +*/ +int sqlite3BtreeInitPage( + MemPage *pPage, /* The page to be initialized */ + MemPage *pParent /* The parent. Might be NULL */ +){ + int pc; /* Address of a freeblock within pPage->aData[] */ + int hdr; /* Offset to beginning of page header */ + u8 *data; /* Equal to pPage->aData */ + BtShared *pBt; /* The main btree structure */ + int usableSize; /* Amount of usable space on each page */ + int cellOffset; /* Offset from start of page to first cell pointer */ + int nFree; /* Number of unused bytes on the page */ + int top; /* First byte of the cell content area */ + + pBt = pPage->pBt; + assert( pBt!=0 ); + assert( pParent==0 || pParent->pBt==pBt ); + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); + assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) ); + assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); + if( pPage->pParent!=pParent && (pPage->pParent!=0 || pPage->isInit) ){ + /* The parent page should never change unless the file is corrupt */ + return SQLITE_CORRUPT_BKPT; + } + if( pPage->isInit ) return SQLITE_OK; + if( pPage->pParent==0 && pParent!=0 ){ + pPage->pParent = pParent; + sqlite3PagerRef(pParent->pDbPage); + } + hdr = pPage->hdrOffset; + data = pPage->aData; + decodeFlags(pPage, data[hdr]); + pPage->nOverflow = 0; + pPage->idxShift = 0; + usableSize = pBt->usableSize; + pPage->cellOffset = cellOffset = hdr + 12 - 4*pPage->leaf; + top = get2byte(&data[hdr+5]); + pPage->nCell = get2byte(&data[hdr+3]); + if( pPage->nCell>MX_CELL(pBt) ){ + /* To many cells for a single page. The page must be corrupt */ + return SQLITE_CORRUPT_BKPT; + } + if( pPage->nCell==0 && pParent!=0 && pParent->pgno!=1 ){ + /* All pages must have at least one cell, except for root pages */ + return SQLITE_CORRUPT_BKPT; + } + + /* Compute the total free space on the page */ + pc = get2byte(&data[hdr+1]); + nFree = data[hdr+7] + top - (cellOffset + 2*pPage->nCell); + while( pc>0 ){ + int next, size; + if( pc>usableSize-4 ){ + /* Free block is off the page */ + return SQLITE_CORRUPT_BKPT; + } + next = get2byte(&data[pc]); + size = get2byte(&data[pc+2]); + if( next>0 && next<=pc+size+3 ){ + /* Free blocks must be in accending order */ + return SQLITE_CORRUPT_BKPT; + } + nFree += size; + pc = next; + } + pPage->nFree = nFree; + if( nFree>=usableSize ){ + /* Free space cannot exceed total page size */ + return SQLITE_CORRUPT_BKPT; + } + + pPage->isInit = 1; + return SQLITE_OK; +} + +/* +** Set up a raw page so that it looks like a database page holding +** no entries. +*/ +static void zeroPage(MemPage *pPage, int flags){ + unsigned char *data = pPage->aData; + BtShared *pBt = pPage->pBt; + int hdr = pPage->hdrOffset; + int first; + + assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno ); + assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); + assert( sqlite3PagerGetData(pPage->pDbPage) == data ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( sqlite3_mutex_held(pBt->mutex) ); + memset(&data[hdr], 0, pBt->usableSize - hdr); + data[hdr] = flags; + first = hdr + 8 + 4*((flags&PTF_LEAF)==0); + memset(&data[hdr+1], 0, 4); + data[hdr+7] = 0; + put2byte(&data[hdr+5], pBt->usableSize); + pPage->nFree = pBt->usableSize - first; + decodeFlags(pPage, flags); + pPage->hdrOffset = hdr; + pPage->cellOffset = first; + pPage->nOverflow = 0; + pPage->idxShift = 0; + pPage->nCell = 0; + pPage->isInit = 1; +} + +/* +** Get a page from the pager. Initialize the MemPage.pBt and +** MemPage.aData elements if needed. +** +** If the noContent flag is set, it means that we do not care about +** the content of the page at this time. So do not go to the disk +** to fetch the content. Just fill in the content with zeros for now. +** If in the future we call sqlite3PagerWrite() on this page, that +** means we have started to be concerned about content and the disk +** read should occur at that point. +*/ +int sqlite3BtreeGetPage( + BtShared *pBt, /* The btree */ + Pgno pgno, /* Number of the page to fetch */ + MemPage **ppPage, /* Return the page in this parameter */ + int noContent /* Do not load page content if true */ +){ + int rc; + MemPage *pPage; + DbPage *pDbPage; + + assert( sqlite3_mutex_held(pBt->mutex) ); + rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, noContent); + if( rc ) return rc; + pPage = (MemPage *)sqlite3PagerGetExtra(pDbPage); + pPage->aData = sqlite3PagerGetData(pDbPage); + pPage->pDbPage = pDbPage; + pPage->pBt = pBt; + pPage->pgno = pgno; + pPage->hdrOffset = pPage->pgno==1 ? 100 : 0; + *ppPage = pPage; + return SQLITE_OK; +} + +/* +** Get a page from the pager and initialize it. This routine +** is just a convenience wrapper around separate calls to +** sqlite3BtreeGetPage() and sqlite3BtreeInitPage(). +*/ +static int getAndInitPage( + BtShared *pBt, /* The database file */ + Pgno pgno, /* Number of the page to get */ + MemPage **ppPage, /* Write the page pointer here */ + MemPage *pParent /* Parent of the page */ +){ + int rc; + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pgno==0 ){ + return SQLITE_CORRUPT_BKPT; + } + rc = sqlite3BtreeGetPage(pBt, pgno, ppPage, 0); + if( rc==SQLITE_OK && (*ppPage)->isInit==0 ){ + rc = sqlite3BtreeInitPage(*ppPage, pParent); + } + return rc; +} + +/* +** Release a MemPage. This should be called once for each prior +** call to sqlite3BtreeGetPage. +*/ +static void releasePage(MemPage *pPage){ + if( pPage ){ + assert( pPage->aData ); + assert( pPage->pBt ); + assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); + assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + sqlite3PagerUnref(pPage->pDbPage); + } +} + +/* +** This routine is called when the reference count for a page +** reaches zero. We need to unref the pParent pointer when that +** happens. +*/ +static void pageDestructor(DbPage *pData, int pageSize){ + MemPage *pPage; + assert( (pageSize & 7)==0 ); + pPage = (MemPage *)sqlite3PagerGetExtra(pData); + assert( pPage->isInit==0 || sqlite3_mutex_held(pPage->pBt->mutex) ); + if( pPage->pParent ){ + MemPage *pParent = pPage->pParent; + assert( pParent->pBt==pPage->pBt ); + pPage->pParent = 0; + releasePage(pParent); + } + pPage->isInit = 0; +} + +/* +** During a rollback, when the pager reloads information into the cache +** so that the cache is restored to its original state at the start of +** the transaction, for each page restored this routine is called. +** +** This routine needs to reset the extra data section at the end of the +** page to agree with the restored data. +*/ +static void pageReinit(DbPage *pData, int pageSize){ + MemPage *pPage; + assert( (pageSize & 7)==0 ); + pPage = (MemPage *)sqlite3PagerGetExtra(pData); + if( pPage->isInit ){ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pPage->isInit = 0; + sqlite3BtreeInitPage(pPage, pPage->pParent); + } +} + +/* +** Open a database file. +** +** zFilename is the name of the database file. If zFilename is NULL +** a new database with a random name is created. This randomly named +** database file will be deleted when sqlite3BtreeClose() is called. +** If zFilename is ":memory:" then an in-memory database is created +** that is automatically destroyed when it is closed. +*/ +int sqlite3BtreeOpen( + const char *zFilename, /* Name of the file containing the BTree database */ + sqlite3 *pSqlite, /* Associated database handle */ + Btree **ppBtree, /* Pointer to new Btree object written here */ + int flags, /* Options */ + int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */ +){ + sqlite3_vfs *pVfs; /* The VFS to use for this btree */ + BtShared *pBt = 0; /* Shared part of btree structure */ + Btree *p; /* Handle to return */ + int rc = SQLITE_OK; + int nReserve; + unsigned char zDbHeader[100]; + + /* Set the variable isMemdb to true for an in-memory database, or + ** false for a file-based database. This symbol is only required if + ** either of the shared-data or autovacuum features are compiled + ** into the library. + */ +#if !defined(SQLITE_OMIT_SHARED_CACHE) || !defined(SQLITE_OMIT_AUTOVACUUM) + #ifdef SQLITE_OMIT_MEMORYDB + const int isMemdb = 0; + #else + const int isMemdb = zFilename && !strcmp(zFilename, ":memory:"); + #endif +#endif + + assert( pSqlite!=0 ); + assert( sqlite3_mutex_held(pSqlite->mutex) ); + + pVfs = pSqlite->pVfs; + p = sqlite3MallocZero(sizeof(Btree)); + if( !p ){ + return SQLITE_NOMEM; + } + p->inTrans = TRANS_NONE; + p->pSqlite = pSqlite; + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) + /* + ** If this Btree is a candidate for shared cache, try to find an + ** existing BtShared object that we can share with + */ + if( (flags & BTREE_PRIVATE)==0 + && isMemdb==0 + && (pSqlite->flags & SQLITE_Vtab)==0 + && zFilename && zFilename[0] + ){ + if( sqlite3SharedCacheEnabled ){ + int nFullPathname = pVfs->mxPathname+1; + char *zFullPathname = (char *)sqlite3_malloc(nFullPathname); + sqlite3_mutex *mutexShared; + p->sharable = 1; + if( pSqlite ){ + pSqlite->flags |= SQLITE_SharedCache; + } + if( !zFullPathname ){ + sqlite3_free(p); + return SQLITE_NOMEM; + } + sqlite3OsFullPathname(pVfs, zFilename, nFullPathname, zFullPathname); + mutexShared = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(mutexShared); + for(pBt=sqlite3SharedCacheList; pBt; pBt=pBt->pNext){ + assert( pBt->nRef>0 ); + if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager)) + && sqlite3PagerVfs(pBt->pPager)==pVfs ){ + p->pBt = pBt; + pBt->nRef++; + break; + } + } + sqlite3_mutex_leave(mutexShared); + sqlite3_free(zFullPathname); + } +#ifdef SQLITE_DEBUG + else{ + /* In debug mode, we mark all persistent databases as sharable + ** even when they are not. This exercises the locking code and + ** gives more opportunity for asserts(sqlite3_mutex_held()) + ** statements to find locking problems. + */ + p->sharable = 1; + } +#endif + } +#endif + if( pBt==0 ){ + /* + ** The following asserts make sure that structures used by the btree are + ** the right size. This is to guard against size changes that result + ** when compiling on a different architecture. + */ + assert( sizeof(i64)==8 || sizeof(i64)==4 ); + assert( sizeof(u64)==8 || sizeof(u64)==4 ); + assert( sizeof(u32)==4 ); + assert( sizeof(u16)==2 ); + assert( sizeof(Pgno)==4 ); + + pBt = sqlite3MallocZero( sizeof(*pBt) ); + if( pBt==0 ){ + rc = SQLITE_NOMEM; + goto btree_open_out; + } + rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename, + EXTRA_SIZE, flags, vfsFlags); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); + } + if( rc!=SQLITE_OK ){ + goto btree_open_out; + } + p->pBt = pBt; + + sqlite3PagerSetDestructor(pBt->pPager, pageDestructor); + sqlite3PagerSetReiniter(pBt->pPager, pageReinit); + pBt->pCursor = 0; + pBt->pPage1 = 0; + pBt->readOnly = sqlite3PagerIsreadonly(pBt->pPager); + pBt->pageSize = get2byte(&zDbHeader[16]); + if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE + || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){ + pBt->pageSize = 0; + sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize); + pBt->maxEmbedFrac = 64; /* 25% */ + pBt->minEmbedFrac = 32; /* 12.5% */ + pBt->minLeafFrac = 32; /* 12.5% */ +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If the magic name ":memory:" will create an in-memory database, then + ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if + ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if + ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a + ** regular file-name. In this case the auto-vacuum applies as per normal. + */ + if( zFilename && !isMemdb ){ + pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0); + pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0); + } +#endif + nReserve = 0; + }else{ + nReserve = zDbHeader[20]; + pBt->maxEmbedFrac = zDbHeader[21]; + pBt->minEmbedFrac = zDbHeader[22]; + pBt->minLeafFrac = zDbHeader[23]; + pBt->pageSizeFixed = 1; +#ifndef SQLITE_OMIT_AUTOVACUUM + pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0); + pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0); +#endif + } + pBt->usableSize = pBt->pageSize - nReserve; + assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */ + sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize); + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) + /* Add the new BtShared object to the linked list sharable BtShareds. + */ + if( p->sharable ){ + sqlite3_mutex *mutexShared; + pBt->nRef = 1; + mutexShared = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); + if( SQLITE_THREADSAFE ){ + pBt->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pBt->mutex==0 ){ + rc = SQLITE_NOMEM; + pSqlite->mallocFailed = 0; + goto btree_open_out; + } + } + sqlite3_mutex_enter(mutexShared); + pBt->pNext = sqlite3SharedCacheList; + sqlite3SharedCacheList = pBt; + sqlite3_mutex_leave(mutexShared); + } +#endif + } + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) + /* If the new Btree uses a sharable pBtShared, then link the new + ** Btree into the list of all sharable Btrees for the same connection. + ** The list is kept in ascending order by pBt address. + */ + if( p->sharable ){ + int i; + Btree *pSib; + for(i=0; inDb; i++){ + if( (pSib = pSqlite->aDb[i].pBt)!=0 && pSib->sharable ){ + while( pSib->pPrev ){ pSib = pSib->pPrev; } + if( p->pBtpBt ){ + p->pNext = pSib; + p->pPrev = 0; + pSib->pPrev = p; + }else{ + while( pSib->pNext && pSib->pNext->pBtpBt ){ + pSib = pSib->pNext; + } + p->pNext = pSib->pNext; + p->pPrev = pSib; + if( p->pNext ){ + p->pNext->pPrev = p; + } + pSib->pNext = p; + } + break; + } + } + } +#endif + *ppBtree = p; + +btree_open_out: + if( rc!=SQLITE_OK ){ + if( pBt && pBt->pPager ){ + sqlite3PagerClose(pBt->pPager); + } + sqlite3_free(pBt); + sqlite3_free(p); + *ppBtree = 0; + } + return rc; +} + +/* +** Decrement the BtShared.nRef counter. When it reaches zero, +** remove the BtShared structure from the sharing list. Return +** true if the BtShared.nRef counter reaches zero and return +** false if it is still positive. +*/ +static int removeFromSharingList(BtShared *pBt){ +#ifndef SQLITE_OMIT_SHARED_CACHE + sqlite3_mutex *pMaster; + BtShared *pList; + int removed = 0; + + assert( sqlite3_mutex_notheld(pBt->mutex) ); + pMaster = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(pMaster); + pBt->nRef--; + if( pBt->nRef<=0 ){ + if( sqlite3SharedCacheList==pBt ){ + sqlite3SharedCacheList = pBt->pNext; + }else{ + pList = sqlite3SharedCacheList; + while( pList && pList->pNext!=pBt ){ + pList=pList->pNext; + } + if( pList ){ + pList->pNext = pBt->pNext; + } + } + if( SQLITE_THREADSAFE ){ + sqlite3_mutex_free(pBt->mutex); + } + removed = 1; + } + sqlite3_mutex_leave(pMaster); + return removed; +#else + return 1; +#endif +} + +/* +** Close an open database and invalidate all cursors. +*/ +int sqlite3BtreeClose(Btree *p){ + BtShared *pBt = p->pBt; + BtCursor *pCur; + + /* Close all cursors opened via this handle. */ + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + sqlite3BtreeEnter(p); + pCur = pBt->pCursor; + while( pCur ){ + BtCursor *pTmp = pCur; + pCur = pCur->pNext; + if( pTmp->pBtree==p ){ + sqlite3BtreeCloseCursor(pTmp); + } + } + + /* Rollback any active transaction and free the handle structure. + ** The call to sqlite3BtreeRollback() drops any table-locks held by + ** this handle. + */ + sqlite3BtreeRollback(p); + sqlite3BtreeLeave(p); + + /* If there are still other outstanding references to the shared-btree + ** structure, return now. The remainder of this procedure cleans + ** up the shared-btree. + */ + assert( p->wantToLock==0 && p->locked==0 ); + if( !p->sharable || removeFromSharingList(pBt) ){ + /* The pBt is no longer on the sharing list, so we can access + ** it without having to hold the mutex. + ** + ** Clean out and delete the BtShared object. + */ + assert( !pBt->pCursor ); + sqlite3PagerClose(pBt->pPager); + if( pBt->xFreeSchema && pBt->pSchema ){ + pBt->xFreeSchema(pBt->pSchema); + } + sqlite3_free(pBt->pSchema); + sqlite3_free(pBt); + } + +#ifndef SQLITE_OMIT_SHARED_CACHE + assert( p->wantToLock==0 ); + assert( p->locked==0 ); + if( p->pPrev ) p->pPrev->pNext = p->pNext; + if( p->pNext ) p->pNext->pPrev = p->pPrev; +#endif + + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Change the busy handler callback function. +*/ +int sqlite3BtreeSetBusyHandler(Btree *p, BusyHandler *pHandler){ + BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + sqlite3BtreeEnter(p); + pBt->pBusyHandler = pHandler; + sqlite3PagerSetBusyhandler(pBt->pPager, pHandler); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} + +/* +** Change the limit on the number of pages allowed in the cache. +** +** The maximum number of cache pages is set to the absolute +** value of mxPage. If mxPage is negative, the pager will +** operate asynchronously - it will not stop to do fsync()s +** to insure data is written to the disk surface before +** continuing. Transactions still work if synchronous is off, +** and the database cannot be corrupted if this program +** crashes. But if the operating system crashes or there is +** an abrupt power failure when synchronous is off, the database +** could be left in an inconsistent and unrecoverable state. +** Synchronous is on by default so database corruption is not +** normally a worry. +*/ +int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){ + BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + sqlite3BtreeEnter(p); + sqlite3PagerSetCachesize(pBt->pPager, mxPage); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} + +/* +** Change the way data is synced to disk in order to increase or decrease +** how well the database resists damage due to OS crashes and power +** failures. Level 1 is the same as asynchronous (no syncs() occur and +** there is a high probability of damage) Level 2 is the default. There +** is a very low but non-zero probability of damage. Level 3 reduces the +** probability of damage to near zero but with a write performance reduction. +*/ +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +int sqlite3BtreeSetSafetyLevel(Btree *p, int level, int fullSync){ + BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + sqlite3BtreeEnter(p); + sqlite3PagerSetSafetyLevel(pBt->pPager, level, fullSync); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} +#endif + +/* +** Return TRUE if the given btree is set to safety level 1. In other +** words, return TRUE if no sync() occurs on the disk files. +*/ +int sqlite3BtreeSyncDisabled(Btree *p){ + BtShared *pBt = p->pBt; + int rc; + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + sqlite3BtreeEnter(p); + assert( pBt && pBt->pPager ); + rc = sqlite3PagerNosync(pBt->pPager); + sqlite3BtreeLeave(p); + return rc; +} + +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM) +/* +** Change the default pages size and the number of reserved bytes per page. +** +** The page size must be a power of 2 between 512 and 65536. If the page +** size supplied does not meet this constraint then the page size is not +** changed. +** +** Page sizes are constrained to be a power of two so that the region +** of the database file used for locking (beginning at PENDING_BYTE, +** the first byte past the 1GB boundary, 0x40000000) needs to occur +** at the beginning of a page. +** +** If parameter nReserve is less than zero, then the number of reserved +** bytes per page is left unchanged. +*/ +int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve){ + int rc = SQLITE_OK; + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( pBt->pageSizeFixed ){ + sqlite3BtreeLeave(p); + return SQLITE_READONLY; + } + if( nReserve<0 ){ + nReserve = pBt->pageSize - pBt->usableSize; + } + if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE && + ((pageSize-1)&pageSize)==0 ){ + assert( (pageSize & 7)==0 ); + assert( !pBt->pPage1 && !pBt->pCursor ); + pBt->pageSize = pageSize; + rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize); + } + pBt->usableSize = pBt->pageSize - nReserve; + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Return the currently defined page size +*/ +int sqlite3BtreeGetPageSize(Btree *p){ + return p->pBt->pageSize; +} +int sqlite3BtreeGetReserve(Btree *p){ + int n; + sqlite3BtreeEnter(p); + n = p->pBt->pageSize - p->pBt->usableSize; + sqlite3BtreeLeave(p); + return n; +} + +/* +** Set the maximum page count for a database if mxPage is positive. +** No changes are made if mxPage is 0 or negative. +** Regardless of the value of mxPage, return the maximum page count. +*/ +int sqlite3BtreeMaxPageCount(Btree *p, int mxPage){ + int n; + sqlite3BtreeEnter(p); + n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage); + sqlite3BtreeLeave(p); + return n; +} +#endif /* !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM) */ + +/* +** Change the 'auto-vacuum' property of the database. If the 'autoVacuum' +** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it +** is disabled. The default value for the auto-vacuum property is +** determined by the SQLITE_DEFAULT_AUTOVACUUM macro. +*/ +int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum){ +#ifdef SQLITE_OMIT_AUTOVACUUM + return SQLITE_READONLY; +#else + BtShared *pBt = p->pBt; + int rc = SQLITE_OK; + int av = (autoVacuum?1:0); + + sqlite3BtreeEnter(p); + if( pBt->pageSizeFixed && av!=pBt->autoVacuum ){ + rc = SQLITE_READONLY; + }else{ + pBt->autoVacuum = av; + } + sqlite3BtreeLeave(p); + return rc; +#endif +} + +/* +** Return the value of the 'auto-vacuum' property. If auto-vacuum is +** enabled 1 is returned. Otherwise 0. +*/ +int sqlite3BtreeGetAutoVacuum(Btree *p){ +#ifdef SQLITE_OMIT_AUTOVACUUM + return BTREE_AUTOVACUUM_NONE; +#else + int rc; + sqlite3BtreeEnter(p); + rc = ( + (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE: + (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL: + BTREE_AUTOVACUUM_INCR + ); + sqlite3BtreeLeave(p); + return rc; +#endif +} + + +/* +** Get a reference to pPage1 of the database file. This will +** also acquire a readlock on that file. +** +** SQLITE_OK is returned on success. If the file is not a +** well-formed database file, then SQLITE_CORRUPT is returned. +** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM +** is returned if we run out of memory. +*/ +static int lockBtree(BtShared *pBt){ + int rc, pageSize; + MemPage *pPage1; + + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pBt->pPage1 ) return SQLITE_OK; + rc = sqlite3BtreeGetPage(pBt, 1, &pPage1, 0); + if( rc!=SQLITE_OK ) return rc; + + + /* Do some checking to help insure the file we opened really is + ** a valid database file. + */ + rc = SQLITE_NOTADB; + if( sqlite3PagerPagecount(pBt->pPager)>0 ){ + u8 *page1 = pPage1->aData; + if( memcmp(page1, zMagicHeader, 16)!=0 ){ + goto page1_init_failed; + } + if( page1[18]>1 ){ + pBt->readOnly = 1; + } + if( page1[19]>1 ){ + goto page1_init_failed; + } + pageSize = get2byte(&page1[16]); + if( ((pageSize-1)&pageSize)!=0 || pageSize<512 || + (SQLITE_MAX_PAGE_SIZE<32768 && pageSize>SQLITE_MAX_PAGE_SIZE) + ){ + goto page1_init_failed; + } + assert( (pageSize & 7)==0 ); + pBt->pageSize = pageSize; + pBt->usableSize = pageSize - page1[20]; + if( pBt->usableSize<500 ){ + goto page1_init_failed; + } + pBt->maxEmbedFrac = page1[21]; + pBt->minEmbedFrac = page1[22]; + pBt->minLeafFrac = page1[23]; +#ifndef SQLITE_OMIT_AUTOVACUUM + pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0); + pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0); +#endif + } + + /* maxLocal is the maximum amount of payload to store locally for + ** a cell. Make sure it is small enough so that at least minFanout + ** cells can will fit on one page. We assume a 10-byte page header. + ** Besides the payload, the cell must store: + ** 2-byte pointer to the cell + ** 4-byte child pointer + ** 9-byte nKey value + ** 4-byte nData value + ** 4-byte overflow page pointer + ** So a cell consists of a 2-byte poiner, a header which is as much as + ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow + ** page pointer. + */ + pBt->maxLocal = (pBt->usableSize-12)*pBt->maxEmbedFrac/255 - 23; + pBt->minLocal = (pBt->usableSize-12)*pBt->minEmbedFrac/255 - 23; + pBt->maxLeaf = pBt->usableSize - 35; + pBt->minLeaf = (pBt->usableSize-12)*pBt->minLeafFrac/255 - 23; + if( pBt->minLocal>pBt->maxLocal || pBt->maxLocal<0 ){ + goto page1_init_failed; + } + assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) ); + pBt->pPage1 = pPage1; + return SQLITE_OK; + +page1_init_failed: + releasePage(pPage1); + pBt->pPage1 = 0; + return rc; +} + +/* +** This routine works like lockBtree() except that it also invokes the +** busy callback if there is lock contention. +*/ +static int lockBtreeWithRetry(Btree *pRef){ + int rc = SQLITE_OK; + + assert( sqlite3BtreeHoldsMutex(pRef) ); + if( pRef->inTrans==TRANS_NONE ){ + u8 inTransaction = pRef->pBt->inTransaction; + btreeIntegrity(pRef); + rc = sqlite3BtreeBeginTrans(pRef, 0); + pRef->pBt->inTransaction = inTransaction; + pRef->inTrans = TRANS_NONE; + if( rc==SQLITE_OK ){ + pRef->pBt->nTransaction--; + } + btreeIntegrity(pRef); + } + return rc; +} + + +/* +** If there are no outstanding cursors and we are not in the middle +** of a transaction but there is a read lock on the database, then +** this routine unrefs the first page of the database file which +** has the effect of releasing the read lock. +** +** If there are any outstanding cursors, this routine is a no-op. +** +** If there is a transaction in progress, this routine is a no-op. +*/ +static void unlockBtreeIfUnused(BtShared *pBt){ + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pBt->inTransaction==TRANS_NONE && pBt->pCursor==0 && pBt->pPage1!=0 ){ + if( sqlite3PagerRefcount(pBt->pPager)>=1 ){ + if( pBt->pPage1->aData==0 ){ + MemPage *pPage = pBt->pPage1; + pPage->aData = sqlite3PagerGetData(pPage->pDbPage); + pPage->pBt = pBt; + pPage->pgno = 1; + } + releasePage(pBt->pPage1); + } + pBt->pPage1 = 0; + pBt->inStmt = 0; + } +} + +/* +** Create a new database by initializing the first page of the +** file. +*/ +static int newDatabase(BtShared *pBt){ + MemPage *pP1; + unsigned char *data; + int rc; + + assert( sqlite3_mutex_held(pBt->mutex) ); + if( sqlite3PagerPagecount(pBt->pPager)>0 ) return SQLITE_OK; + pP1 = pBt->pPage1; + assert( pP1!=0 ); + data = pP1->aData; + rc = sqlite3PagerWrite(pP1->pDbPage); + if( rc ) return rc; + memcpy(data, zMagicHeader, sizeof(zMagicHeader)); + assert( sizeof(zMagicHeader)==16 ); + put2byte(&data[16], pBt->pageSize); + data[18] = 1; + data[19] = 1; + data[20] = pBt->pageSize - pBt->usableSize; + data[21] = pBt->maxEmbedFrac; + data[22] = pBt->minEmbedFrac; + data[23] = pBt->minLeafFrac; + memset(&data[24], 0, 100-24); + zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA ); + pBt->pageSizeFixed = 1; +#ifndef SQLITE_OMIT_AUTOVACUUM + assert( pBt->autoVacuum==1 || pBt->autoVacuum==0 ); + assert( pBt->incrVacuum==1 || pBt->incrVacuum==0 ); + put4byte(&data[36 + 4*4], pBt->autoVacuum); + put4byte(&data[36 + 7*4], pBt->incrVacuum); +#endif + return SQLITE_OK; +} + +/* +** Attempt to start a new transaction. A write-transaction +** is started if the second argument is nonzero, otherwise a read- +** transaction. If the second argument is 2 or more and exclusive +** transaction is started, meaning that no other process is allowed +** to access the database. A preexisting transaction may not be +** upgraded to exclusive by calling this routine a second time - the +** exclusivity flag only works for a new transaction. +** +** A write-transaction must be started before attempting any +** changes to the database. None of the following routines +** will work unless a transaction is started first: +** +** sqlite3BtreeCreateTable() +** sqlite3BtreeCreateIndex() +** sqlite3BtreeClearTable() +** sqlite3BtreeDropTable() +** sqlite3BtreeInsert() +** sqlite3BtreeDelete() +** sqlite3BtreeUpdateMeta() +** +** If an initial attempt to acquire the lock fails because of lock contention +** and the database was previously unlocked, then invoke the busy handler +** if there is one. But if there was previously a read-lock, do not +** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is +** returned when there is already a read-lock in order to avoid a deadlock. +** +** Suppose there are two processes A and B. A has a read lock and B has +** a reserved lock. B tries to promote to exclusive but is blocked because +** of A's read lock. A tries to promote to reserved but is blocked by B. +** One or the other of the two processes must give way or there can be +** no progress. By returning SQLITE_BUSY and not invoking the busy callback +** when A already has a read lock, we encourage A to give up and let B +** proceed. +*/ +int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ + BtShared *pBt = p->pBt; + int rc = SQLITE_OK; + + sqlite3BtreeEnter(p); + btreeIntegrity(p); + + /* If the btree is already in a write-transaction, or it + ** is already in a read-transaction and a read-transaction + ** is requested, this is a no-op. + */ + if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){ + goto trans_begun; + } + + /* Write transactions are not possible on a read-only database */ + if( pBt->readOnly && wrflag ){ + rc = SQLITE_READONLY; + goto trans_begun; + } + + /* If another database handle has already opened a write transaction + ** on this shared-btree structure and a second write transaction is + ** requested, return SQLITE_BUSY. + */ + if( pBt->inTransaction==TRANS_WRITE && wrflag ){ + rc = SQLITE_BUSY; + goto trans_begun; + } + + do { + if( pBt->pPage1==0 ){ + rc = lockBtree(pBt); + } + + if( rc==SQLITE_OK && wrflag ){ + if( pBt->readOnly ){ + rc = SQLITE_READONLY; + }else{ + rc = sqlite3PagerBegin(pBt->pPage1->pDbPage, wrflag>1); + if( rc==SQLITE_OK ){ + rc = newDatabase(pBt); + } + } + } + + if( rc==SQLITE_OK ){ + if( wrflag ) pBt->inStmt = 0; + }else{ + unlockBtreeIfUnused(pBt); + } + }while( rc==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && + sqlite3InvokeBusyHandler(pBt->pBusyHandler) ); + + if( rc==SQLITE_OK ){ + if( p->inTrans==TRANS_NONE ){ + pBt->nTransaction++; + } + p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ); + if( p->inTrans>pBt->inTransaction ){ + pBt->inTransaction = p->inTrans; + } + } + + +trans_begun: + btreeIntegrity(p); + sqlite3BtreeLeave(p); + return rc; +} + +#ifndef SQLITE_OMIT_AUTOVACUUM + +/* +** Set the pointer-map entries for all children of page pPage. Also, if +** pPage contains cells that point to overflow pages, set the pointer +** map entries for the overflow pages as well. +*/ +static int setChildPtrmaps(MemPage *pPage){ + int i; /* Counter variable */ + int nCell; /* Number of cells in page pPage */ + int rc; /* Return code */ + BtShared *pBt = pPage->pBt; + int isInitOrig = pPage->isInit; + Pgno pgno = pPage->pgno; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + rc = sqlite3BtreeInitPage(pPage, pPage->pParent); + if( rc!=SQLITE_OK ){ + goto set_child_ptrmaps_out; + } + nCell = pPage->nCell; + + for(i=0; ileaf ){ + Pgno childPgno = get4byte(pCell); + rc = ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno); + if( rc!=SQLITE_OK ) goto set_child_ptrmaps_out; + } + } + + if( !pPage->leaf ){ + Pgno childPgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); + rc = ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno); + } + +set_child_ptrmaps_out: + pPage->isInit = isInitOrig; + return rc; +} + +/* +** Somewhere on pPage, which is guarenteed to be a btree page, not an overflow +** page, is a pointer to page iFrom. Modify this pointer so that it points to +** iTo. Parameter eType describes the type of pointer to be modified, as +** follows: +** +** PTRMAP_BTREE: pPage is a btree-page. The pointer points at a child +** page of pPage. +** +** PTRMAP_OVERFLOW1: pPage is a btree-page. The pointer points at an overflow +** page pointed to by one of the cells on pPage. +** +** PTRMAP_OVERFLOW2: pPage is an overflow-page. The pointer points at the next +** overflow page in the list. +*/ +static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + if( eType==PTRMAP_OVERFLOW2 ){ + /* The pointer is always the first 4 bytes of the page in this case. */ + if( get4byte(pPage->aData)!=iFrom ){ + return SQLITE_CORRUPT_BKPT; + } + put4byte(pPage->aData, iTo); + }else{ + int isInitOrig = pPage->isInit; + int i; + int nCell; + + sqlite3BtreeInitPage(pPage, 0); + nCell = pPage->nCell; + + for(i=0; iaData[pPage->hdrOffset+8])!=iFrom ){ + return SQLITE_CORRUPT_BKPT; + } + put4byte(&pPage->aData[pPage->hdrOffset+8], iTo); + } + + pPage->isInit = isInitOrig; + } + return SQLITE_OK; +} + + +/* +** Move the open database page pDbPage to location iFreePage in the +** database. The pDbPage reference remains valid. +*/ +static int relocatePage( + BtShared *pBt, /* Btree */ + MemPage *pDbPage, /* Open page to move */ + u8 eType, /* Pointer map 'type' entry for pDbPage */ + Pgno iPtrPage, /* Pointer map 'page-no' entry for pDbPage */ + Pgno iFreePage /* The location to move pDbPage to */ +){ + MemPage *pPtrPage; /* The page that contains a pointer to pDbPage */ + Pgno iDbPage = pDbPage->pgno; + Pager *pPager = pBt->pPager; + int rc; + + assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 || + eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ); + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pDbPage->pBt==pBt ); + + /* Move page iDbPage from it's current location to page number iFreePage */ + TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", + iDbPage, iFreePage, iPtrPage, eType)); + rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage); + if( rc!=SQLITE_OK ){ + return rc; + } + pDbPage->pgno = iFreePage; + + /* If pDbPage was a btree-page, then it may have child pages and/or cells + ** that point to overflow pages. The pointer map entries for all these + ** pages need to be changed. + ** + ** If pDbPage is an overflow page, then the first 4 bytes may store a + ** pointer to a subsequent overflow page. If this is the case, then + ** the pointer map needs to be updated for the subsequent overflow page. + */ + if( eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ){ + rc = setChildPtrmaps(pDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + }else{ + Pgno nextOvfl = get4byte(pDbPage->aData); + if( nextOvfl!=0 ){ + rc = ptrmapPut(pBt, nextOvfl, PTRMAP_OVERFLOW2, iFreePage); + if( rc!=SQLITE_OK ){ + return rc; + } + } + } + + /* Fix the database pointer on page iPtrPage that pointed at iDbPage so + ** that it points at iFreePage. Also fix the pointer map entry for + ** iPtrPage. + */ + if( eType!=PTRMAP_ROOTPAGE ){ + rc = sqlite3BtreeGetPage(pBt, iPtrPage, &pPtrPage, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = sqlite3PagerWrite(pPtrPage->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(pPtrPage); + return rc; + } + rc = modifyPagePointer(pPtrPage, iDbPage, iFreePage, eType); + releasePage(pPtrPage); + if( rc==SQLITE_OK ){ + rc = ptrmapPut(pBt, iFreePage, eType, iPtrPage); + } + } + return rc; +} + +/* Forward declaration required by incrVacuumStep(). */ +static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8); + +/* +** Perform a single step of an incremental-vacuum. If successful, +** return SQLITE_OK. If there is no work to do (and therefore no +** point in calling this function again), return SQLITE_DONE. +** +** More specificly, this function attempts to re-organize the +** database so that the last page of the file currently in use +** is no longer in use. +** +** If the nFin parameter is non-zero, the implementation assumes +** that the caller will keep calling incrVacuumStep() until +** it returns SQLITE_DONE or an error, and that nFin is the +** number of pages the database file will contain after this +** process is complete. +*/ +static int incrVacuumStep(BtShared *pBt, Pgno nFin){ + Pgno iLastPg; /* Last page in the database */ + Pgno nFreeList; /* Number of pages still on the free-list */ + + assert( sqlite3_mutex_held(pBt->mutex) ); + iLastPg = pBt->nTrunc; + if( iLastPg==0 ){ + iLastPg = sqlite3PagerPagecount(pBt->pPager); + } + + if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){ + int rc; + u8 eType; + Pgno iPtrPage; + + nFreeList = get4byte(&pBt->pPage1->aData[36]); + if( nFreeList==0 || nFin==iLastPg ){ + return SQLITE_DONE; + } + + rc = ptrmapGet(pBt, iLastPg, &eType, &iPtrPage); + if( rc!=SQLITE_OK ){ + return rc; + } + if( eType==PTRMAP_ROOTPAGE ){ + return SQLITE_CORRUPT_BKPT; + } + + if( eType==PTRMAP_FREEPAGE ){ + if( nFin==0 ){ + /* Remove the page from the files free-list. This is not required + ** if nFin is non-zero. In that case, the free-list will be + ** truncated to zero after this function returns, so it doesn't + ** matter if it still contains some garbage entries. + */ + Pgno iFreePg; + MemPage *pFreePg; + rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iLastPg, 1); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( iFreePg==iLastPg ); + releasePage(pFreePg); + } + } else { + Pgno iFreePg; /* Index of free page to move pLastPg to */ + MemPage *pLastPg; + + rc = sqlite3BtreeGetPage(pBt, iLastPg, &pLastPg, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* If nFin is zero, this loop runs exactly once and page pLastPg + ** is swapped with the first free page pulled off the free list. + ** + ** On the other hand, if nFin is greater than zero, then keep + ** looping until a free-page located within the first nFin pages + ** of the file is found. + */ + do { + MemPage *pFreePg; + rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, 0, 0); + if( rc!=SQLITE_OK ){ + releasePage(pLastPg); + return rc; + } + releasePage(pFreePg); + }while( nFin!=0 && iFreePg>nFin ); + assert( iFreePgpDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg); + releasePage(pLastPg); + if( rc!=SQLITE_OK ){ + return rc; + } + } + } + + pBt->nTrunc = iLastPg - 1; + while( pBt->nTrunc==PENDING_BYTE_PAGE(pBt)||PTRMAP_ISPAGE(pBt, pBt->nTrunc) ){ + pBt->nTrunc--; + } + return SQLITE_OK; +} + +/* +** A write-transaction must be opened before calling this function. +** It performs a single unit of work towards an incremental vacuum. +** +** If the incremental vacuum is finished after this function has run, +** SQLITE_DONE is returned. If it is not finished, but no error occured, +** SQLITE_OK is returned. Otherwise an SQLite error code. +*/ +int sqlite3BtreeIncrVacuum(Btree *p){ + int rc; + BtShared *pBt = p->pBt; + + sqlite3BtreeEnter(p); + assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE ); + if( !pBt->autoVacuum ){ + rc = SQLITE_DONE; + }else{ + invalidateAllOverflowCache(pBt); + rc = incrVacuumStep(pBt, 0); + } + sqlite3BtreeLeave(p); + return rc; +} + +/* +** This routine is called prior to sqlite3PagerCommit when a transaction +** is commited for an auto-vacuum database. +** +** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages +** the database file should be truncated to during the commit process. +** i.e. the database has been reorganized so that only the first *pnTrunc +** pages are in use. +*/ +static int autoVacuumCommit(BtShared *pBt, Pgno *pnTrunc){ + int rc = SQLITE_OK; + Pager *pPager = pBt->pPager; +#ifndef NDEBUG + int nRef = sqlite3PagerRefcount(pPager); +#endif + + assert( sqlite3_mutex_held(pBt->mutex) ); + invalidateAllOverflowCache(pBt); + assert(pBt->autoVacuum); + if( !pBt->incrVacuum ){ + Pgno nFin = 0; + + if( pBt->nTrunc==0 ){ + Pgno nFree; + Pgno nPtrmap; + const int pgsz = pBt->pageSize; + Pgno nOrig = sqlite3PagerPagecount(pBt->pPager); + + if( PTRMAP_ISPAGE(pBt, nOrig) ){ + return SQLITE_CORRUPT_BKPT; + } + if( nOrig==PENDING_BYTE_PAGE(pBt) ){ + nOrig--; + } + nFree = get4byte(&pBt->pPage1->aData[36]); + nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+pgsz/5)/(pgsz/5); + nFin = nOrig - nFree - nPtrmap; + if( nOrig>PENDING_BYTE_PAGE(pBt) && nFin<=PENDING_BYTE_PAGE(pBt) ){ + nFin--; + } + while( PTRMAP_ISPAGE(pBt, nFin) || nFin==PENDING_BYTE_PAGE(pBt) ){ + nFin--; + } + } + + while( rc==SQLITE_OK ){ + rc = incrVacuumStep(pBt, nFin); + } + if( rc==SQLITE_DONE ){ + assert(nFin==0 || pBt->nTrunc==0 || nFin<=pBt->nTrunc); + rc = SQLITE_OK; + if( pBt->nTrunc ){ + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + put4byte(&pBt->pPage1->aData[32], 0); + put4byte(&pBt->pPage1->aData[36], 0); + pBt->nTrunc = nFin; + } + } + if( rc!=SQLITE_OK ){ + sqlite3PagerRollback(pPager); + } + } + + if( rc==SQLITE_OK ){ + *pnTrunc = pBt->nTrunc; + pBt->nTrunc = 0; + } + assert( nRef==sqlite3PagerRefcount(pPager) ); + return rc; +} + +#endif + +/* +** This routine does the first phase of a two-phase commit. This routine +** causes a rollback journal to be created (if it does not already exist) +** and populated with enough information so that if a power loss occurs +** the database can be restored to its original state by playing back +** the journal. Then the contents of the journal are flushed out to +** the disk. After the journal is safely on oxide, the changes to the +** database are written into the database file and flushed to oxide. +** At the end of this call, the rollback journal still exists on the +** disk and we are still holding all locks, so the transaction has not +** committed. See sqlite3BtreeCommit() for the second phase of the +** commit process. +** +** This call is a no-op if no write-transaction is currently active on pBt. +** +** Otherwise, sync the database file for the btree pBt. zMaster points to +** the name of a master journal file that should be written into the +** individual journal file, or is NULL, indicating no master journal file +** (single database transaction). +** +** When this is called, the master journal should already have been +** created, populated with this journal pointer and synced to disk. +** +** Once this is routine has returned, the only thing required to commit +** the write-transaction for this database file is to delete the journal. +*/ +int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zMaster){ + int rc = SQLITE_OK; + if( p->inTrans==TRANS_WRITE ){ + BtShared *pBt = p->pBt; + Pgno nTrunc = 0; + sqlite3BtreeEnter(p); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + rc = autoVacuumCommit(pBt, &nTrunc); + if( rc!=SQLITE_OK ){ + sqlite3BtreeLeave(p); + return rc; + } + } +#endif + rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, nTrunc); + sqlite3BtreeLeave(p); + } + return rc; +} + +/* +** Commit the transaction currently in progress. +** +** This routine implements the second phase of a 2-phase commit. The +** sqlite3BtreeSync() routine does the first phase and should be invoked +** prior to calling this routine. The sqlite3BtreeSync() routine did +** all the work of writing information out to disk and flushing the +** contents so that they are written onto the disk platter. All this +** routine has to do is delete or truncate the rollback journal +** (which causes the transaction to commit) and drop locks. +** +** This will release the write lock on the database file. If there +** are no active cursors, it also releases the read lock. +*/ +int sqlite3BtreeCommitPhaseTwo(Btree *p){ + BtShared *pBt = p->pBt; + + sqlite3BtreeEnter(p); + btreeIntegrity(p); + + /* If the handle has a write-transaction open, commit the shared-btrees + ** transaction and set the shared state to TRANS_READ. + */ + if( p->inTrans==TRANS_WRITE ){ + int rc; + assert( pBt->inTransaction==TRANS_WRITE ); + assert( pBt->nTransaction>0 ); + rc = sqlite3PagerCommitPhaseTwo(pBt->pPager); + if( rc!=SQLITE_OK ){ + sqlite3BtreeLeave(p); + return rc; + } + pBt->inTransaction = TRANS_READ; + pBt->inStmt = 0; + } + unlockAllTables(p); + + /* If the handle has any kind of transaction open, decrement the transaction + ** count of the shared btree. If the transaction count reaches 0, set + ** the shared state to TRANS_NONE. The unlockBtreeIfUnused() call below + ** will unlock the pager. + */ + if( p->inTrans!=TRANS_NONE ){ + pBt->nTransaction--; + if( 0==pBt->nTransaction ){ + pBt->inTransaction = TRANS_NONE; + } + } + + /* Set the handles current transaction state to TRANS_NONE and unlock + ** the pager if this call closed the only read or write transaction. + */ + p->inTrans = TRANS_NONE; + unlockBtreeIfUnused(pBt); + + btreeIntegrity(p); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} + +/* +** Do both phases of a commit. +*/ +int sqlite3BtreeCommit(Btree *p){ + int rc; + sqlite3BtreeEnter(p); + rc = sqlite3BtreeCommitPhaseOne(p, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeCommitPhaseTwo(p); + } + sqlite3BtreeLeave(p); + return rc; +} + +#ifndef NDEBUG +/* +** Return the number of write-cursors open on this handle. This is for use +** in assert() expressions, so it is only compiled if NDEBUG is not +** defined. +** +** For the purposes of this routine, a write-cursor is any cursor that +** is capable of writing to the databse. That means the cursor was +** originally opened for writing and the cursor has not be disabled +** by having its state changed to CURSOR_FAULT. +*/ +static int countWriteCursors(BtShared *pBt){ + BtCursor *pCur; + int r = 0; + for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){ + if( pCur->wrFlag && pCur->eState!=CURSOR_FAULT ) r++; + } + return r; +} +#endif + +/* +** This routine sets the state to CURSOR_FAULT and the error +** code to errCode for every cursor on BtShared that pBtree +** references. +** +** Every cursor is tripped, including cursors that belong +** to other database connections that happen to be sharing +** the cache with pBtree. +** +** This routine gets called when a rollback occurs. +** All cursors using the same cache must be tripped +** to prevent them from trying to use the btree after +** the rollback. The rollback may have deleted tables +** or moved root pages, so it is not sufficient to +** save the state of the cursor. The cursor must be +** invalidated. +*/ +void sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode){ + BtCursor *p; + sqlite3BtreeEnter(pBtree); + for(p=pBtree->pBt->pCursor; p; p=p->pNext){ + clearCursorPosition(p); + p->eState = CURSOR_FAULT; + p->skip = errCode; + } + sqlite3BtreeLeave(pBtree); +} + +/* +** Rollback the transaction in progress. All cursors will be +** invalided by this operation. Any attempt to use a cursor +** that was open at the beginning of this operation will result +** in an error. +** +** This will release the write lock on the database file. If there +** are no active cursors, it also releases the read lock. +*/ +int sqlite3BtreeRollback(Btree *p){ + int rc; + BtShared *pBt = p->pBt; + MemPage *pPage1; + + sqlite3BtreeEnter(p); + rc = saveAllCursors(pBt, 0, 0); +#ifndef SQLITE_OMIT_SHARED_CACHE + if( rc!=SQLITE_OK ){ + /* This is a horrible situation. An IO or malloc() error occured whilst + ** trying to save cursor positions. If this is an automatic rollback (as + ** the result of a constraint, malloc() failure or IO error) then + ** the cache may be internally inconsistent (not contain valid trees) so + ** we cannot simply return the error to the caller. Instead, abort + ** all queries that may be using any of the cursors that failed to save. + */ + sqlite3BtreeTripAllCursors(p, rc); + } +#endif + btreeIntegrity(p); + unlockAllTables(p); + + if( p->inTrans==TRANS_WRITE ){ + int rc2; + +#ifndef SQLITE_OMIT_AUTOVACUUM + pBt->nTrunc = 0; +#endif + + assert( TRANS_WRITE==pBt->inTransaction ); + rc2 = sqlite3PagerRollback(pBt->pPager); + if( rc2!=SQLITE_OK ){ + rc = rc2; + } + + /* The rollback may have destroyed the pPage1->aData value. So + ** call sqlite3BtreeGetPage() on page 1 again to make + ** sure pPage1->aData is set correctly. */ + if( sqlite3BtreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){ + releasePage(pPage1); + } + assert( countWriteCursors(pBt)==0 ); + pBt->inTransaction = TRANS_READ; + } + + if( p->inTrans!=TRANS_NONE ){ + assert( pBt->nTransaction>0 ); + pBt->nTransaction--; + if( 0==pBt->nTransaction ){ + pBt->inTransaction = TRANS_NONE; + } + } + + p->inTrans = TRANS_NONE; + pBt->inStmt = 0; + unlockBtreeIfUnused(pBt); + + btreeIntegrity(p); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Start a statement subtransaction. The subtransaction can +** can be rolled back independently of the main transaction. +** You must start a transaction before starting a subtransaction. +** The subtransaction is ended automatically if the main transaction +** commits or rolls back. +** +** Only one subtransaction may be active at a time. It is an error to try +** to start a new subtransaction if another subtransaction is already active. +** +** Statement subtransactions are used around individual SQL statements +** that are contained within a BEGIN...COMMIT block. If a constraint +** error occurs within the statement, the effect of that one statement +** can be rolled back without having to rollback the entire transaction. +*/ +int sqlite3BtreeBeginStmt(Btree *p){ + int rc; + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( (p->inTrans!=TRANS_WRITE) || pBt->inStmt ){ + rc = pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; + }else{ + assert( pBt->inTransaction==TRANS_WRITE ); + rc = pBt->readOnly ? SQLITE_OK : sqlite3PagerStmtBegin(pBt->pPager); + pBt->inStmt = 1; + } + sqlite3BtreeLeave(p); + return rc; +} + + +/* +** Commit the statment subtransaction currently in progress. If no +** subtransaction is active, this is a no-op. +*/ +int sqlite3BtreeCommitStmt(Btree *p){ + int rc; + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( pBt->inStmt && !pBt->readOnly ){ + rc = sqlite3PagerStmtCommit(pBt->pPager); + }else{ + rc = SQLITE_OK; + } + pBt->inStmt = 0; + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Rollback the active statement subtransaction. If no subtransaction +** is active this routine is a no-op. +** +** All cursors will be invalidated by this operation. Any attempt +** to use a cursor that was open at the beginning of this operation +** will result in an error. +*/ +int sqlite3BtreeRollbackStmt(Btree *p){ + int rc = SQLITE_OK; + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( pBt->inStmt && !pBt->readOnly ){ + rc = sqlite3PagerStmtRollback(pBt->pPager); + assert( countWriteCursors(pBt)==0 ); + pBt->inStmt = 0; + } + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Default key comparison function to be used if no comparison function +** is specified on the sqlite3BtreeCursor() call. +*/ +static int dfltCompare( + void *NotUsed, /* User data is not used */ + int n1, const void *p1, /* First key to compare */ + int n2, const void *p2 /* Second key to compare */ +){ + int c; + c = memcmp(p1, p2, n1pBt; + + assert( sqlite3BtreeHoldsMutex(p) ); + *ppCur = 0; + if( wrFlag ){ + if( pBt->readOnly ){ + return SQLITE_READONLY; + } + if( checkReadLocks(p, iTable, 0) ){ + return SQLITE_LOCKED; + } + } + + if( pBt->pPage1==0 ){ + rc = lockBtreeWithRetry(p); + if( rc!=SQLITE_OK ){ + return rc; + } + if( pBt->readOnly && wrFlag ){ + return SQLITE_READONLY; + } + } + pCur = sqlite3MallocZero( sizeof(*pCur) ); + if( pCur==0 ){ + rc = SQLITE_NOMEM; + goto create_cursor_exception; + } + pCur->pgnoRoot = (Pgno)iTable; + if( iTable==1 && sqlite3PagerPagecount(pBt->pPager)==0 ){ + rc = SQLITE_EMPTY; + goto create_cursor_exception; + } + rc = getAndInitPage(pBt, pCur->pgnoRoot, &pCur->pPage, 0); + if( rc!=SQLITE_OK ){ + goto create_cursor_exception; + } + + /* Now that no other errors can occur, finish filling in the BtCursor + ** variables, link the cursor into the BtShared list and set *ppCur (the + ** output argument to this function). + */ + pCur->xCompare = xCmp ? xCmp : dfltCompare; + pCur->pArg = pArg; + pCur->pBtree = p; + pCur->pBt = pBt; + pCur->wrFlag = wrFlag; + pCur->pNext = pBt->pCursor; + if( pCur->pNext ){ + pCur->pNext->pPrev = pCur; + } + pBt->pCursor = pCur; + pCur->eState = CURSOR_INVALID; + *ppCur = pCur; + + return SQLITE_OK; + +create_cursor_exception: + if( pCur ){ + releasePage(pCur->pPage); + sqlite3_free(pCur); + } + unlockBtreeIfUnused(pBt); + return rc; +} +int sqlite3BtreeCursor( + Btree *p, /* The btree */ + int iTable, /* Root page of table to open */ + int wrFlag, /* 1 to write. 0 read-only */ + int (*xCmp)(void*,int,const void*,int,const void*), /* Key Comparison func */ + void *pArg, /* First arg to xCompare() */ + BtCursor **ppCur /* Write new cursor here */ +){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeCursor(p, iTable, wrFlag, xCmp, pArg, ppCur); + sqlite3BtreeLeave(p); + return rc; +} + + +/* +** Close a cursor. The read lock on the database file is released +** when the last cursor is closed. +*/ +int sqlite3BtreeCloseCursor(BtCursor *pCur){ + BtShared *pBt = pCur->pBt; + Btree *pBtree = pCur->pBtree; + + sqlite3BtreeEnter(pBtree); + clearCursorPosition(pCur); + if( pCur->pPrev ){ + pCur->pPrev->pNext = pCur->pNext; + }else{ + pBt->pCursor = pCur->pNext; + } + if( pCur->pNext ){ + pCur->pNext->pPrev = pCur->pPrev; + } + releasePage(pCur->pPage); + unlockBtreeIfUnused(pBt); + invalidateOverflowCache(pCur); + sqlite3_free(pCur); + sqlite3BtreeLeave(pBtree); + return SQLITE_OK; +} + +/* +** Make a temporary cursor by filling in the fields of pTempCur. +** The temporary cursor is not on the cursor list for the Btree. +*/ +void sqlite3BtreeGetTempCursor(BtCursor *pCur, BtCursor *pTempCur){ + assert( cursorHoldsMutex(pCur) ); + memcpy(pTempCur, pCur, sizeof(*pCur)); + pTempCur->pNext = 0; + pTempCur->pPrev = 0; + if( pTempCur->pPage ){ + sqlite3PagerRef(pTempCur->pPage->pDbPage); + } +} + +/* +** Delete a temporary cursor such as was made by the CreateTemporaryCursor() +** function above. +*/ +void sqlite3BtreeReleaseTempCursor(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + if( pCur->pPage ){ + sqlite3PagerUnref(pCur->pPage->pDbPage); + } +} + +/* +** Make sure the BtCursor* given in the argument has a valid +** BtCursor.info structure. If it is not already valid, call +** sqlite3BtreeParseCell() to fill it in. +** +** BtCursor.info is a cache of the information in the current cell. +** Using this cache reduces the number of calls to sqlite3BtreeParseCell(). +** +** 2007-06-25: There is a bug in some versions of MSVC that cause the +** compiler to crash when getCellInfo() is implemented as a macro. +** But there is a measureable speed advantage to using the macro on gcc +** (when less compiler optimizations like -Os or -O0 are used and the +** compiler is not doing agressive inlining.) So we use a real function +** for MSVC and a macro for everything else. Ticket #2457. +*/ +#ifndef NDEBUG + static void assertCellInfo(BtCursor *pCur){ + CellInfo info; + memset(&info, 0, sizeof(info)); + sqlite3BtreeParseCell(pCur->pPage, pCur->idx, &info); + assert( memcmp(&info, &pCur->info, sizeof(info))==0 ); + } +#else + #define assertCellInfo(x) +#endif +#ifdef _MSC_VER + /* Use a real function in MSVC to work around bugs in that compiler. */ + static void getCellInfo(BtCursor *pCur){ + if( pCur->info.nSize==0 ){ + sqlite3BtreeParseCell(pCur->pPage, pCur->idx, &pCur->info); + }else{ + assertCellInfo(pCur); + } + } +#else /* if not _MSC_VER */ + /* Use a macro in all other compilers so that the function is inlined */ +#define getCellInfo(pCur) \ + if( pCur->info.nSize==0 ){ \ + sqlite3BtreeParseCell(pCur->pPage, pCur->idx, &pCur->info); \ + }else{ \ + assertCellInfo(pCur); \ + } +#endif /* _MSC_VER */ + +/* +** Set *pSize to the size of the buffer needed to hold the value of +** the key for the current entry. If the cursor is not pointing +** to a valid entry, *pSize is set to 0. +** +** For a table with the INTKEY flag set, this routine returns the key +** itself, not the number of bytes in the key. +*/ +int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize){ + int rc; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreOrClearCursorPosition(pCur); + if( rc==SQLITE_OK ){ + assert( pCur->eState==CURSOR_INVALID || pCur->eState==CURSOR_VALID ); + if( pCur->eState==CURSOR_INVALID ){ + *pSize = 0; + }else{ + getCellInfo(pCur); + *pSize = pCur->info.nKey; + } + } + return rc; +} + +/* +** Set *pSize to the number of bytes of data in the entry the +** cursor currently points to. Always return SQLITE_OK. +** Failure is not possible. If the cursor is not currently +** pointing to an entry (which can happen, for example, if +** the database is empty) then *pSize is set to 0. +*/ +int sqlite3BtreeDataSize(BtCursor *pCur, u32 *pSize){ + int rc; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreOrClearCursorPosition(pCur); + if( rc==SQLITE_OK ){ + assert( pCur->eState==CURSOR_INVALID || pCur->eState==CURSOR_VALID ); + if( pCur->eState==CURSOR_INVALID ){ + /* Not pointing at a valid entry - set *pSize to 0. */ + *pSize = 0; + }else{ + getCellInfo(pCur); + *pSize = pCur->info.nData; + } + } + return rc; +} + +/* +** Given the page number of an overflow page in the database (parameter +** ovfl), this function finds the page number of the next page in the +** linked list of overflow pages. If possible, it uses the auto-vacuum +** pointer-map data instead of reading the content of page ovfl to do so. +** +** If an error occurs an SQLite error code is returned. Otherwise: +** +** Unless pPgnoNext is NULL, the page number of the next overflow +** page in the linked list is written to *pPgnoNext. If page ovfl +** is the last page in it's linked list, *pPgnoNext is set to zero. +** +** If ppPage is not NULL, *ppPage is set to the MemPage* handle +** for page ovfl. The underlying pager page may have been requested +** with the noContent flag set, so the page data accessable via +** this handle may not be trusted. +*/ +static int getOverflowPage( + BtShared *pBt, + Pgno ovfl, /* Overflow page */ + MemPage **ppPage, /* OUT: MemPage handle */ + Pgno *pPgnoNext /* OUT: Next overflow page number */ +){ + Pgno next = 0; + int rc; + + assert( sqlite3_mutex_held(pBt->mutex) ); + /* One of these must not be NULL. Otherwise, why call this function? */ + assert(ppPage || pPgnoNext); + + /* If pPgnoNext is NULL, then this function is being called to obtain + ** a MemPage* reference only. No page-data is required in this case. + */ + if( !pPgnoNext ){ + return sqlite3BtreeGetPage(pBt, ovfl, ppPage, 1); + } + +#ifndef SQLITE_OMIT_AUTOVACUUM + /* Try to find the next page in the overflow list using the + ** autovacuum pointer-map pages. Guess that the next page in + ** the overflow list is page number (ovfl+1). If that guess turns + ** out to be wrong, fall back to loading the data of page + ** number ovfl to determine the next page number. + */ + if( pBt->autoVacuum ){ + Pgno pgno; + Pgno iGuess = ovfl+1; + u8 eType; + + while( PTRMAP_ISPAGE(pBt, iGuess) || iGuess==PENDING_BYTE_PAGE(pBt) ){ + iGuess++; + } + + if( iGuess<=sqlite3PagerPagecount(pBt->pPager) ){ + rc = ptrmapGet(pBt, iGuess, &eType, &pgno); + if( rc!=SQLITE_OK ){ + return rc; + } + if( eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ + next = iGuess; + } + } + } +#endif + + if( next==0 || ppPage ){ + MemPage *pPage = 0; + + rc = sqlite3BtreeGetPage(pBt, ovfl, &pPage, next!=0); + assert(rc==SQLITE_OK || pPage==0); + if( next==0 && rc==SQLITE_OK ){ + next = get4byte(pPage->aData); + } + + if( ppPage ){ + *ppPage = pPage; + }else{ + releasePage(pPage); + } + } + *pPgnoNext = next; + + return rc; +} + +/* +** Copy data from a buffer to a page, or from a page to a buffer. +** +** pPayload is a pointer to data stored on database page pDbPage. +** If argument eOp is false, then nByte bytes of data are copied +** from pPayload to the buffer pointed at by pBuf. If eOp is true, +** then sqlite3PagerWrite() is called on pDbPage and nByte bytes +** of data are copied from the buffer pBuf to pPayload. +** +** SQLITE_OK is returned on success, otherwise an error code. +*/ +static int copyPayload( + void *pPayload, /* Pointer to page data */ + void *pBuf, /* Pointer to buffer */ + int nByte, /* Number of bytes to copy */ + int eOp, /* 0 -> copy from page, 1 -> copy to page */ + DbPage *pDbPage /* Page containing pPayload */ +){ + if( eOp ){ + /* Copy data from buffer to page (a write operation) */ + int rc = sqlite3PagerWrite(pDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + memcpy(pPayload, pBuf, nByte); + }else{ + /* Copy data from page to buffer (a read operation) */ + memcpy(pBuf, pPayload, nByte); + } + return SQLITE_OK; +} + +/* +** This function is used to read or overwrite payload information +** for the entry that the pCur cursor is pointing to. If the eOp +** parameter is 0, this is a read operation (data copied into +** buffer pBuf). If it is non-zero, a write (data copied from +** buffer pBuf). +** +** A total of "amt" bytes are read or written beginning at "offset". +** Data is read to or from the buffer pBuf. +** +** This routine does not make a distinction between key and data. +** It just reads or writes bytes from the payload area. Data might +** appear on the main page or be scattered out on multiple overflow +** pages. +** +** If the BtCursor.isIncrblobHandle flag is set, and the current +** cursor entry uses one or more overflow pages, this function +** allocates space for and lazily popluates the overflow page-list +** cache array (BtCursor.aOverflow). Subsequent calls use this +** cache to make seeking to the supplied offset more efficient. +** +** Once an overflow page-list cache has been allocated, it may be +** invalidated if some other cursor writes to the same table, or if +** the cursor is moved to a different row. Additionally, in auto-vacuum +** mode, the following events may invalidate an overflow page-list cache. +** +** * An incremental vacuum, +** * A commit in auto_vacuum="full" mode, +** * Creating a table (may require moving an overflow page). +*/ +static int accessPayload( + BtCursor *pCur, /* Cursor pointing to entry to read from */ + int offset, /* Begin reading this far into payload */ + int amt, /* Read this many bytes */ + unsigned char *pBuf, /* Write the bytes into this buffer */ + int skipKey, /* offset begins at data if this is true */ + int eOp /* zero to read. non-zero to write. */ +){ + unsigned char *aPayload; + int rc = SQLITE_OK; + u32 nKey; + int iIdx = 0; + MemPage *pPage = pCur->pPage; /* Btree page of current cursor entry */ + BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */ + + assert( pPage ); + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->idx>=0 && pCur->idxnCell ); + assert( offset>=0 ); + assert( cursorHoldsMutex(pCur) ); + + getCellInfo(pCur); + aPayload = pCur->info.pCell + pCur->info.nHeader; + nKey = (pPage->intKey ? 0 : pCur->info.nKey); + + if( skipKey ){ + offset += nKey; + } + if( offset+amt > nKey+pCur->info.nData ){ + /* Trying to read or write past the end of the data is an error */ + return SQLITE_ERROR; + } + + /* Check if data must be read/written to/from the btree page itself. */ + if( offsetinfo.nLocal ){ + int a = amt; + if( a+offset>pCur->info.nLocal ){ + a = pCur->info.nLocal - offset; + } + rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage); + offset = 0; + pBuf += a; + amt -= a; + }else{ + offset -= pCur->info.nLocal; + } + + if( rc==SQLITE_OK && amt>0 ){ + const int ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */ + Pgno nextPage; + + nextPage = get4byte(&aPayload[pCur->info.nLocal]); + +#ifndef SQLITE_OMIT_INCRBLOB + /* If the isIncrblobHandle flag is set and the BtCursor.aOverflow[] + ** has not been allocated, allocate it now. The array is sized at + ** one entry for each overflow page in the overflow chain. The + ** page number of the first overflow page is stored in aOverflow[0], + ** etc. A value of 0 in the aOverflow[] array means "not yet known" + ** (the cache is lazily populated). + */ + if( pCur->isIncrblobHandle && !pCur->aOverflow ){ + int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; + pCur->aOverflow = (Pgno *)sqlite3MallocZero(sizeof(Pgno)*nOvfl); + if( nOvfl && !pCur->aOverflow ){ + rc = SQLITE_NOMEM; + } + } + + /* If the overflow page-list cache has been allocated and the + ** entry for the first required overflow page is valid, skip + ** directly to it. + */ + if( pCur->aOverflow && pCur->aOverflow[offset/ovflSize] ){ + iIdx = (offset/ovflSize); + nextPage = pCur->aOverflow[iIdx]; + offset = (offset%ovflSize); + } +#endif + + for( ; rc==SQLITE_OK && amt>0 && nextPage; iIdx++){ + +#ifndef SQLITE_OMIT_INCRBLOB + /* If required, populate the overflow page-list cache. */ + if( pCur->aOverflow ){ + assert(!pCur->aOverflow[iIdx] || pCur->aOverflow[iIdx]==nextPage); + pCur->aOverflow[iIdx] = nextPage; + } +#endif + + if( offset>=ovflSize ){ + /* The only reason to read this page is to obtain the page + ** number for the next page in the overflow chain. The page + ** data is not required. So first try to lookup the overflow + ** page-list cache, if any, then fall back to the getOverflowPage() + ** function. + */ +#ifndef SQLITE_OMIT_INCRBLOB + if( pCur->aOverflow && pCur->aOverflow[iIdx+1] ){ + nextPage = pCur->aOverflow[iIdx+1]; + } else +#endif + rc = getOverflowPage(pBt, nextPage, 0, &nextPage); + offset -= ovflSize; + }else{ + /* Need to read this page properly. It contains some of the + ** range of data that is being read (eOp==0) or written (eOp!=0). + */ + DbPage *pDbPage; + int a = amt; + rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage); + if( rc==SQLITE_OK ){ + aPayload = sqlite3PagerGetData(pDbPage); + nextPage = get4byte(aPayload); + if( a + offset > ovflSize ){ + a = ovflSize - offset; + } + rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage); + sqlite3PagerUnref(pDbPage); + offset = 0; + amt -= a; + pBuf += a; + } + } + } + } + + if( rc==SQLITE_OK && amt>0 ){ + return SQLITE_CORRUPT_BKPT; + } + return rc; +} + +/* +** Read part of the key associated with cursor pCur. Exactly +** "amt" bytes will be transfered into pBuf[]. The transfer +** begins at "offset". +** +** Return SQLITE_OK on success or an error code if anything goes +** wrong. An error is returned if "offset+amt" is larger than +** the available payload. +*/ +int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ + int rc; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreOrClearCursorPosition(pCur); + if( rc==SQLITE_OK ){ + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->pPage!=0 ); + if( pCur->pPage->intKey ){ + return SQLITE_CORRUPT_BKPT; + } + assert( pCur->pPage->intKey==0 ); + assert( pCur->idx>=0 && pCur->idxpPage->nCell ); + rc = accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0, 0); + } + return rc; +} + +/* +** Read part of the data associated with cursor pCur. Exactly +** "amt" bytes will be transfered into pBuf[]. The transfer +** begins at "offset". +** +** Return SQLITE_OK on success or an error code if anything goes +** wrong. An error is returned if "offset+amt" is larger than +** the available payload. +*/ +int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ + int rc; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreOrClearCursorPosition(pCur); + if( rc==SQLITE_OK ){ + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->pPage!=0 ); + assert( pCur->idx>=0 && pCur->idxpPage->nCell ); + rc = accessPayload(pCur, offset, amt, pBuf, 1, 0); + } + return rc; +} + +/* +** Return a pointer to payload information from the entry that the +** pCur cursor is pointing to. The pointer is to the beginning of +** the key if skipKey==0 and it points to the beginning of data if +** skipKey==1. The number of bytes of available key/data is written +** into *pAmt. If *pAmt==0, then the value returned will not be +** a valid pointer. +** +** This routine is an optimization. It is common for the entire key +** and data to fit on the local page and for there to be no overflow +** pages. When that is so, this routine can be used to access the +** key and data without making a copy. If the key and/or data spills +** onto overflow pages, then accessPayload() must be used to reassembly +** the key/data and copy it into a preallocated buffer. +** +** The pointer returned by this routine looks directly into the cached +** page of the database. The data might change or move the next time +** any btree routine is called. +*/ +static const unsigned char *fetchPayload( + BtCursor *pCur, /* Cursor pointing to entry to read from */ + int *pAmt, /* Write the number of available bytes here */ + int skipKey /* read beginning at data if this is true */ +){ + unsigned char *aPayload; + MemPage *pPage; + u32 nKey; + int nLocal; + + assert( pCur!=0 && pCur->pPage!=0 ); + assert( pCur->eState==CURSOR_VALID ); + assert( cursorHoldsMutex(pCur) ); + pPage = pCur->pPage; + assert( pCur->idx>=0 && pCur->idxnCell ); + getCellInfo(pCur); + aPayload = pCur->info.pCell; + aPayload += pCur->info.nHeader; + if( pPage->intKey ){ + nKey = 0; + }else{ + nKey = pCur->info.nKey; + } + if( skipKey ){ + aPayload += nKey; + nLocal = pCur->info.nLocal - nKey; + }else{ + nLocal = pCur->info.nLocal; + if( nLocal>nKey ){ + nLocal = nKey; + } + } + *pAmt = nLocal; + return aPayload; +} + + +/* +** For the entry that cursor pCur is point to, return as +** many bytes of the key or data as are available on the local +** b-tree page. Write the number of available bytes into *pAmt. +** +** The pointer returned is ephemeral. The key/data may move +** or be destroyed on the next call to any Btree routine, +** including calls from other threads against the same cache. +** Hence, a mutex on the BtShared should be held prior to calling +** this routine. +** +** These routines is used to get quick access to key and data +** in the common case where no overflow pages are used. +*/ +const void *sqlite3BtreeKeyFetch(BtCursor *pCur, int *pAmt){ + assert( cursorHoldsMutex(pCur) ); + if( pCur->eState==CURSOR_VALID ){ + return (const void*)fetchPayload(pCur, pAmt, 0); + } + return 0; +} +const void *sqlite3BtreeDataFetch(BtCursor *pCur, int *pAmt){ + assert( cursorHoldsMutex(pCur) ); + if( pCur->eState==CURSOR_VALID ){ + return (const void*)fetchPayload(pCur, pAmt, 1); + } + return 0; +} + + +/* +** Move the cursor down to a new child page. The newPgno argument is the +** page number of the child page to move to. +*/ +static int moveToChild(BtCursor *pCur, u32 newPgno){ + int rc; + MemPage *pNewPage; + MemPage *pOldPage; + BtShared *pBt = pCur->pBt; + + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + rc = getAndInitPage(pBt, newPgno, &pNewPage, pCur->pPage); + if( rc ) return rc; + pNewPage->idxParent = pCur->idx; + pOldPage = pCur->pPage; + pOldPage->idxShift = 0; + releasePage(pOldPage); + pCur->pPage = pNewPage; + pCur->idx = 0; + pCur->info.nSize = 0; + if( pNewPage->nCell<1 ){ + return SQLITE_CORRUPT_BKPT; + } + return SQLITE_OK; +} + +/* +** Return true if the page is the virtual root of its table. +** +** The virtual root page is the root page for most tables. But +** for the table rooted on page 1, sometime the real root page +** is empty except for the right-pointer. In such cases the +** virtual root page is the page that the right-pointer of page +** 1 is pointing to. +*/ +int sqlite3BtreeIsRootPage(MemPage *pPage){ + MemPage *pParent; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pParent = pPage->pParent; + if( pParent==0 ) return 1; + if( pParent->pgno>1 ) return 0; + if( get2byte(&pParent->aData[pParent->hdrOffset+3])==0 ) return 1; + return 0; +} + +/* +** Move the cursor up to the parent page. +** +** pCur->idx is set to the cell index that contains the pointer +** to the page we are coming from. If we are coming from the +** right-most child page then pCur->idx is set to one more than +** the largest cell index. +*/ +void sqlite3BtreeMoveToParent(BtCursor *pCur){ + MemPage *pParent; + MemPage *pPage; + int idxParent; + + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + pPage = pCur->pPage; + assert( pPage!=0 ); + assert( !sqlite3BtreeIsRootPage(pPage) ); + pParent = pPage->pParent; + assert( pParent!=0 ); + idxParent = pPage->idxParent; + sqlite3PagerRef(pParent->pDbPage); + releasePage(pPage); + pCur->pPage = pParent; + pCur->info.nSize = 0; + assert( pParent->idxShift==0 ); + pCur->idx = idxParent; +} + +/* +** Move the cursor to the root page +*/ +static int moveToRoot(BtCursor *pCur){ + MemPage *pRoot; + int rc = SQLITE_OK; + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; + + assert( cursorHoldsMutex(pCur) ); + assert( CURSOR_INVALID < CURSOR_REQUIRESEEK ); + assert( CURSOR_VALID < CURSOR_REQUIRESEEK ); + assert( CURSOR_FAULT > CURSOR_REQUIRESEEK ); + if( pCur->eState>=CURSOR_REQUIRESEEK ){ + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skip; + } + clearCursorPosition(pCur); + } + pRoot = pCur->pPage; + if( pRoot && pRoot->pgno==pCur->pgnoRoot ){ + assert( pRoot->isInit ); + }else{ + if( + SQLITE_OK!=(rc = getAndInitPage(pBt, pCur->pgnoRoot, &pRoot, 0)) + ){ + pCur->eState = CURSOR_INVALID; + return rc; + } + releasePage(pCur->pPage); + pCur->pPage = pRoot; + } + pCur->idx = 0; + pCur->info.nSize = 0; + if( pRoot->nCell==0 && !pRoot->leaf ){ + Pgno subpage; + assert( pRoot->pgno==1 ); + subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]); + assert( subpage>0 ); + pCur->eState = CURSOR_VALID; + rc = moveToChild(pCur, subpage); + } + pCur->eState = ((pCur->pPage->nCell>0)?CURSOR_VALID:CURSOR_INVALID); + return rc; +} + +/* +** Move the cursor down to the left-most leaf entry beneath the +** entry to which it is currently pointing. +** +** The left-most leaf is the one with the smallest key - the first +** in ascending order. +*/ +static int moveToLeftmost(BtCursor *pCur){ + Pgno pgno; + int rc = SQLITE_OK; + MemPage *pPage; + + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + while( rc==SQLITE_OK && !(pPage = pCur->pPage)->leaf ){ + assert( pCur->idx>=0 && pCur->idxnCell ); + pgno = get4byte(findCell(pPage, pCur->idx)); + rc = moveToChild(pCur, pgno); + } + return rc; +} + +/* +** Move the cursor down to the right-most leaf entry beneath the +** page to which it is currently pointing. Notice the difference +** between moveToLeftmost() and moveToRightmost(). moveToLeftmost() +** finds the left-most entry beneath the *entry* whereas moveToRightmost() +** finds the right-most entry beneath the *page*. +** +** The right-most entry is the one with the largest key - the last +** key in ascending order. +*/ +static int moveToRightmost(BtCursor *pCur){ + Pgno pgno; + int rc = SQLITE_OK; + MemPage *pPage; + + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + while( rc==SQLITE_OK && !(pPage = pCur->pPage)->leaf ){ + pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); + pCur->idx = pPage->nCell; + rc = moveToChild(pCur, pgno); + } + if( rc==SQLITE_OK ){ + pCur->idx = pPage->nCell - 1; + pCur->info.nSize = 0; + } + return SQLITE_OK; +} + +/* Move the cursor to the first entry in the table. Return SQLITE_OK +** on success. Set *pRes to 0 if the cursor actually points to something +** or set *pRes to 1 if the table is empty. +*/ +int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ + int rc; + + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->pSqlite->mutex) ); + rc = moveToRoot(pCur); + if( rc==SQLITE_OK ){ + if( pCur->eState==CURSOR_INVALID ){ + assert( pCur->pPage->nCell==0 ); + *pRes = 1; + rc = SQLITE_OK; + }else{ + assert( pCur->pPage->nCell>0 ); + *pRes = 0; + rc = moveToLeftmost(pCur); + } + } + return rc; +} + +/* Move the cursor to the last entry in the table. Return SQLITE_OK +** on success. Set *pRes to 0 if the cursor actually points to something +** or set *pRes to 1 if the table is empty. +*/ +int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ + int rc; + + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->pSqlite->mutex) ); + rc = moveToRoot(pCur); + if( rc==SQLITE_OK ){ + if( CURSOR_INVALID==pCur->eState ){ + assert( pCur->pPage->nCell==0 ); + *pRes = 1; + }else{ + assert( pCur->eState==CURSOR_VALID ); + *pRes = 0; + rc = moveToRightmost(pCur); + } + } + return rc; +} + +/* Move the cursor so that it points to an entry near pKey/nKey. +** Return a success code. +** +** For INTKEY tables, only the nKey parameter is used. pKey is +** ignored. For other tables, nKey is the number of bytes of data +** in pKey. The comparison function specified when the cursor was +** created is used to compare keys. +** +** If an exact match is not found, then the cursor is always +** left pointing at a leaf page which would hold the entry if it +** were present. The cursor might point to an entry that comes +** before or after the key. +** +** The result of comparing the key with the entry to which the +** cursor is written to *pRes if pRes!=NULL. The meaning of +** this value is as follows: +** +** *pRes<0 The cursor is left pointing at an entry that +** is smaller than pKey or if the table is empty +** and the cursor is therefore left point to nothing. +** +** *pRes==0 The cursor is left pointing at an entry that +** exactly matches pKey. +** +** *pRes>0 The cursor is left pointing at an entry that +** is larger than pKey. +** +*/ +int sqlite3BtreeMoveto( + BtCursor *pCur, /* The cursor to be moved */ + const void *pKey, /* The key content for indices. Not used by tables */ + i64 nKey, /* Size of pKey. Or the key for tables */ + int biasRight, /* If true, bias the search to the high end */ + int *pRes /* Search result flag */ +){ + int rc; + + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->pSqlite->mutex) ); + rc = moveToRoot(pCur); + if( rc ){ + return rc; + } + assert( pCur->pPage ); + assert( pCur->pPage->isInit ); + if( pCur->eState==CURSOR_INVALID ){ + *pRes = -1; + assert( pCur->pPage->nCell==0 ); + return SQLITE_OK; + } + for(;;){ + int lwr, upr; + Pgno chldPg; + MemPage *pPage = pCur->pPage; + int c = -1; /* pRes return if table is empty must be -1 */ + lwr = 0; + upr = pPage->nCell-1; + if( !pPage->intKey && pKey==0 ){ + return SQLITE_CORRUPT_BKPT; + } + if( biasRight ){ + pCur->idx = upr; + }else{ + pCur->idx = (upr+lwr)/2; + } + if( lwr<=upr ) for(;;){ + void *pCellKey; + i64 nCellKey; + pCur->info.nSize = 0; + if( pPage->intKey ){ + u8 *pCell; + pCell = findCell(pPage, pCur->idx) + pPage->childPtrSize; + if( pPage->hasData ){ + u32 dummy; + pCell += getVarint32(pCell, &dummy); + } + getVarint(pCell, (u64 *)&nCellKey); + if( nCellKeynKey ){ + c = +1; + }else{ + c = 0; + } + }else{ + int available; + pCellKey = (void *)fetchPayload(pCur, &available, 0); + nCellKey = pCur->info.nKey; + if( available>=nCellKey ){ + c = pCur->xCompare(pCur->pArg, nCellKey, pCellKey, nKey, pKey); + }else{ + pCellKey = sqlite3_malloc( nCellKey ); + if( pCellKey==0 ) return SQLITE_NOMEM; + rc = sqlite3BtreeKey(pCur, 0, nCellKey, (void *)pCellKey); + c = pCur->xCompare(pCur->pArg, nCellKey, pCellKey, nKey, pKey); + sqlite3_free(pCellKey); + if( rc ){ + return rc; + } + } + } + if( c==0 ){ + if( pPage->leafData && !pPage->leaf ){ + lwr = pCur->idx; + upr = lwr - 1; + break; + }else{ + if( pRes ) *pRes = 0; + return SQLITE_OK; + } + } + if( c<0 ){ + lwr = pCur->idx+1; + }else{ + upr = pCur->idx-1; + } + if( lwr>upr ){ + break; + } + pCur->idx = (lwr+upr)/2; + } + assert( lwr==upr+1 ); + assert( pPage->isInit ); + if( pPage->leaf ){ + chldPg = 0; + }else if( lwr>=pPage->nCell ){ + chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]); + }else{ + chldPg = get4byte(findCell(pPage, lwr)); + } + if( chldPg==0 ){ + assert( pCur->idx>=0 && pCur->idxpPage->nCell ); + if( pRes ) *pRes = c; + return SQLITE_OK; + } + pCur->idx = lwr; + pCur->info.nSize = 0; + rc = moveToChild(pCur, chldPg); + if( rc ){ + return rc; + } + } + /* NOT REACHED */ +} + + +/* +** Return TRUE if the cursor is not pointing at an entry of the table. +** +** TRUE will be returned after a call to sqlite3BtreeNext() moves +** past the last entry in the table or sqlite3BtreePrev() moves past +** the first entry. TRUE is also returned if the table is empty. +*/ +int sqlite3BtreeEof(BtCursor *pCur){ + /* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries + ** have been deleted? This API will need to change to return an error code + ** as well as the boolean result value. + */ + return (CURSOR_VALID!=pCur->eState); +} + +/* +** Return the database connection handle for a cursor. +*/ +sqlite3 *sqlite3BtreeCursorDb(const BtCursor *pCur){ + assert( sqlite3_mutex_held(pCur->pBtree->pSqlite->mutex) ); + return pCur->pBtree->pSqlite; +} + +/* +** Advance the cursor to the next entry in the database. If +** successful then set *pRes=0. If the cursor +** was already pointing to the last entry in the database before +** this routine was called, then set *pRes=1. +*/ +static int btreeNext(BtCursor *pCur, int *pRes){ + int rc; + MemPage *pPage; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreOrClearCursorPosition(pCur); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( pRes!=0 ); + pPage = pCur->pPage; + if( CURSOR_INVALID==pCur->eState ){ + *pRes = 1; + return SQLITE_OK; + } + if( pCur->skip>0 ){ + pCur->skip = 0; + *pRes = 0; + return SQLITE_OK; + } + pCur->skip = 0; + + assert( pPage->isInit ); + assert( pCur->idxnCell ); + + pCur->idx++; + pCur->info.nSize = 0; + if( pCur->idx>=pPage->nCell ){ + if( !pPage->leaf ){ + rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); + if( rc ) return rc; + rc = moveToLeftmost(pCur); + *pRes = 0; + return rc; + } + do{ + if( sqlite3BtreeIsRootPage(pPage) ){ + *pRes = 1; + pCur->eState = CURSOR_INVALID; + return SQLITE_OK; + } + sqlite3BtreeMoveToParent(pCur); + pPage = pCur->pPage; + }while( pCur->idx>=pPage->nCell ); + *pRes = 0; + if( pPage->leafData ){ + rc = sqlite3BtreeNext(pCur, pRes); + }else{ + rc = SQLITE_OK; + } + return rc; + } + *pRes = 0; + if( pPage->leaf ){ + return SQLITE_OK; + } + rc = moveToLeftmost(pCur); + return rc; +} +int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ + int rc; + assert( cursorHoldsMutex(pCur) ); + rc = btreeNext(pCur, pRes); + return rc; +} + + +/* +** Step the cursor to the back to the previous entry in the database. If +** successful then set *pRes=0. If the cursor +** was already pointing to the first entry in the database before +** this routine was called, then set *pRes=1. +*/ +static int btreePrevious(BtCursor *pCur, int *pRes){ + int rc; + Pgno pgno; + MemPage *pPage; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreOrClearCursorPosition(pCur); + if( rc!=SQLITE_OK ){ + return rc; + } + if( CURSOR_INVALID==pCur->eState ){ + *pRes = 1; + return SQLITE_OK; + } + if( pCur->skip<0 ){ + pCur->skip = 0; + *pRes = 0; + return SQLITE_OK; + } + pCur->skip = 0; + + pPage = pCur->pPage; + assert( pPage->isInit ); + assert( pCur->idx>=0 ); + if( !pPage->leaf ){ + pgno = get4byte( findCell(pPage, pCur->idx) ); + rc = moveToChild(pCur, pgno); + if( rc ){ + return rc; + } + rc = moveToRightmost(pCur); + }else{ + while( pCur->idx==0 ){ + if( sqlite3BtreeIsRootPage(pPage) ){ + pCur->eState = CURSOR_INVALID; + *pRes = 1; + return SQLITE_OK; + } + sqlite3BtreeMoveToParent(pCur); + pPage = pCur->pPage; + } + pCur->idx--; + pCur->info.nSize = 0; + if( pPage->leafData && !pPage->leaf ){ + rc = sqlite3BtreePrevious(pCur, pRes); + }else{ + rc = SQLITE_OK; + } + } + *pRes = 0; + return rc; +} +int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ + int rc; + assert( cursorHoldsMutex(pCur) ); + rc = btreePrevious(pCur, pRes); + return rc; +} + +/* +** Allocate a new page from the database file. +** +** The new page is marked as dirty. (In other words, sqlite3PagerWrite() +** has already been called on the new page.) The new page has also +** been referenced and the calling routine is responsible for calling +** sqlite3PagerUnref() on the new page when it is done. +** +** SQLITE_OK is returned on success. Any other return value indicates +** an error. *ppPage and *pPgno are undefined in the event of an error. +** Do not invoke sqlite3PagerUnref() on *ppPage if an error is returned. +** +** If the "nearby" parameter is not 0, then a (feeble) effort is made to +** locate a page close to the page number "nearby". This can be used in an +** attempt to keep related pages close to each other in the database file, +** which in turn can make database access faster. +** +** If the "exact" parameter is not 0, and the page-number nearby exists +** anywhere on the free-list, then it is guarenteed to be returned. This +** is only used by auto-vacuum databases when allocating a new table. +*/ +static int allocateBtreePage( + BtShared *pBt, + MemPage **ppPage, + Pgno *pPgno, + Pgno nearby, + u8 exact +){ + MemPage *pPage1; + int rc; + int n; /* Number of pages on the freelist */ + int k; /* Number of leaves on the trunk of the freelist */ + MemPage *pTrunk = 0; + MemPage *pPrevTrunk = 0; + + assert( sqlite3_mutex_held(pBt->mutex) ); + pPage1 = pBt->pPage1; + n = get4byte(&pPage1->aData[36]); + if( n>0 ){ + /* There are pages on the freelist. Reuse one of those pages. */ + Pgno iTrunk; + u8 searchList = 0; /* If the free-list must be searched for 'nearby' */ + + /* If the 'exact' parameter was true and a query of the pointer-map + ** shows that the page 'nearby' is somewhere on the free-list, then + ** the entire-list will be searched for that page. + */ +#ifndef SQLITE_OMIT_AUTOVACUUM + if( exact && nearby<=sqlite3PagerPagecount(pBt->pPager) ){ + u8 eType; + assert( nearby>0 ); + assert( pBt->autoVacuum ); + rc = ptrmapGet(pBt, nearby, &eType, 0); + if( rc ) return rc; + if( eType==PTRMAP_FREEPAGE ){ + searchList = 1; + } + *pPgno = nearby; + } +#endif + + /* Decrement the free-list count by 1. Set iTrunk to the index of the + ** first free-list trunk page. iPrevTrunk is initially 1. + */ + rc = sqlite3PagerWrite(pPage1->pDbPage); + if( rc ) return rc; + put4byte(&pPage1->aData[36], n-1); + + /* The code within this loop is run only once if the 'searchList' variable + ** is not true. Otherwise, it runs once for each trunk-page on the + ** free-list until the page 'nearby' is located. + */ + do { + pPrevTrunk = pTrunk; + if( pPrevTrunk ){ + iTrunk = get4byte(&pPrevTrunk->aData[0]); + }else{ + iTrunk = get4byte(&pPage1->aData[32]); + } + rc = sqlite3BtreeGetPage(pBt, iTrunk, &pTrunk, 0); + if( rc ){ + pTrunk = 0; + goto end_allocate_page; + } + + k = get4byte(&pTrunk->aData[4]); + if( k==0 && !searchList ){ + /* The trunk has no leaves and the list is not being searched. + ** So extract the trunk page itself and use it as the newly + ** allocated page */ + assert( pPrevTrunk==0 ); + rc = sqlite3PagerWrite(pTrunk->pDbPage); + if( rc ){ + goto end_allocate_page; + } + *pPgno = iTrunk; + memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); + *ppPage = pTrunk; + pTrunk = 0; + TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + }else if( k>pBt->usableSize/4 - 8 ){ + /* Value of k is out of range. Database corruption */ + rc = SQLITE_CORRUPT_BKPT; + goto end_allocate_page; +#ifndef SQLITE_OMIT_AUTOVACUUM + }else if( searchList && nearby==iTrunk ){ + /* The list is being searched and this trunk page is the page + ** to allocate, regardless of whether it has leaves. + */ + assert( *pPgno==iTrunk ); + *ppPage = pTrunk; + searchList = 0; + rc = sqlite3PagerWrite(pTrunk->pDbPage); + if( rc ){ + goto end_allocate_page; + } + if( k==0 ){ + if( !pPrevTrunk ){ + memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); + }else{ + memcpy(&pPrevTrunk->aData[0], &pTrunk->aData[0], 4); + } + }else{ + /* The trunk page is required by the caller but it contains + ** pointers to free-list leaves. The first leaf becomes a trunk + ** page in this case. + */ + MemPage *pNewTrunk; + Pgno iNewTrunk = get4byte(&pTrunk->aData[8]); + rc = sqlite3BtreeGetPage(pBt, iNewTrunk, &pNewTrunk, 0); + if( rc!=SQLITE_OK ){ + goto end_allocate_page; + } + rc = sqlite3PagerWrite(pNewTrunk->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(pNewTrunk); + goto end_allocate_page; + } + memcpy(&pNewTrunk->aData[0], &pTrunk->aData[0], 4); + put4byte(&pNewTrunk->aData[4], k-1); + memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4); + releasePage(pNewTrunk); + if( !pPrevTrunk ){ + put4byte(&pPage1->aData[32], iNewTrunk); + }else{ + rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); + if( rc ){ + goto end_allocate_page; + } + put4byte(&pPrevTrunk->aData[0], iNewTrunk); + } + } + pTrunk = 0; + TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); +#endif + }else{ + /* Extract a leaf from the trunk */ + int closest; + Pgno iPage; + unsigned char *aData = pTrunk->aData; + rc = sqlite3PagerWrite(pTrunk->pDbPage); + if( rc ){ + goto end_allocate_page; + } + if( nearby>0 ){ + int i, dist; + closest = 0; + dist = get4byte(&aData[8]) - nearby; + if( dist<0 ) dist = -dist; + for(i=1; isqlite3PagerPagecount(pBt->pPager) ){ + /* Free page off the end of the file */ + return SQLITE_CORRUPT_BKPT; + } + TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d" + ": %d more free pages\n", + *pPgno, closest+1, k, pTrunk->pgno, n-1)); + if( closestpDbPage); + rc = sqlite3PagerWrite((*ppPage)->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(*ppPage); + } + } + searchList = 0; + } + } + releasePage(pPrevTrunk); + pPrevTrunk = 0; + }while( searchList ); + }else{ + /* There are no pages on the freelist, so create a new page at the + ** end of the file */ + *pPgno = sqlite3PagerPagecount(pBt->pPager) + 1; + +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->nTrunc ){ + /* An incr-vacuum has already run within this transaction. So the + ** page to allocate is not from the physical end of the file, but + ** at pBt->nTrunc. + */ + *pPgno = pBt->nTrunc+1; + if( *pPgno==PENDING_BYTE_PAGE(pBt) ){ + (*pPgno)++; + } + } + if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, *pPgno) ){ + /* If *pPgno refers to a pointer-map page, allocate two new pages + ** at the end of the file instead of one. The first allocated page + ** becomes a new pointer-map page, the second is used by the caller. + */ + TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", *pPgno)); + assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); + (*pPgno)++; + } + if( pBt->nTrunc ){ + pBt->nTrunc = *pPgno; + } +#endif + + assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); + rc = sqlite3BtreeGetPage(pBt, *pPgno, ppPage, 0); + if( rc ) return rc; + rc = sqlite3PagerWrite((*ppPage)->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(*ppPage); + } + TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); + } + + assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); + +end_allocate_page: + releasePage(pTrunk); + releasePage(pPrevTrunk); + return rc; +} + +/* +** Add a page of the database file to the freelist. +** +** sqlite3PagerUnref() is NOT called for pPage. +*/ +static int freePage(MemPage *pPage){ + BtShared *pBt = pPage->pBt; + MemPage *pPage1 = pBt->pPage1; + int rc, n, k; + + /* Prepare the page for freeing */ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( pPage->pgno>1 ); + pPage->isInit = 0; + releasePage(pPage->pParent); + pPage->pParent = 0; + + /* Increment the free page count on pPage1 */ + rc = sqlite3PagerWrite(pPage1->pDbPage); + if( rc ) return rc; + n = get4byte(&pPage1->aData[36]); + put4byte(&pPage1->aData[36], n+1); + +#ifdef SQLITE_SECURE_DELETE + /* If the SQLITE_SECURE_DELETE compile-time option is enabled, then + ** always fully overwrite deleted information with zeros. + */ + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc ) return rc; + memset(pPage->aData, 0, pPage->pBt->pageSize); +#endif + +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If the database supports auto-vacuum, write an entry in the pointer-map + ** to indicate that the page is free. + */ + if( pBt->autoVacuum ){ + rc = ptrmapPut(pBt, pPage->pgno, PTRMAP_FREEPAGE, 0); + if( rc ) return rc; + } +#endif + + if( n==0 ){ + /* This is the first free page */ + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc ) return rc; + memset(pPage->aData, 0, 8); + put4byte(&pPage1->aData[32], pPage->pgno); + TRACE(("FREE-PAGE: %d first\n", pPage->pgno)); + }else{ + /* Other free pages already exist. Retrive the first trunk page + ** of the freelist and find out how many leaves it has. */ + MemPage *pTrunk; + rc = sqlite3BtreeGetPage(pBt, get4byte(&pPage1->aData[32]), &pTrunk, 0); + if( rc ) return rc; + k = get4byte(&pTrunk->aData[4]); + if( k>=pBt->usableSize/4 - 8 ){ + /* The trunk is full. Turn the page being freed into a new + ** trunk page with no leaves. */ + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc==SQLITE_OK ){ + put4byte(pPage->aData, pTrunk->pgno); + put4byte(&pPage->aData[4], 0); + put4byte(&pPage1->aData[32], pPage->pgno); + TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", + pPage->pgno, pTrunk->pgno)); + } + }else if( k<0 ){ + rc = SQLITE_CORRUPT; + }else{ + /* Add the newly freed page as a leaf on the current trunk */ + rc = sqlite3PagerWrite(pTrunk->pDbPage); + if( rc==SQLITE_OK ){ + put4byte(&pTrunk->aData[4], k+1); + put4byte(&pTrunk->aData[8+k*4], pPage->pgno); +#ifndef SQLITE_SECURE_DELETE + sqlite3PagerDontWrite(pPage->pDbPage); +#endif + } + TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); + } + releasePage(pTrunk); + } + return rc; +} + +/* +** Free any overflow pages associated with the given Cell. +*/ +static int clearCell(MemPage *pPage, unsigned char *pCell){ + BtShared *pBt = pPage->pBt; + CellInfo info; + Pgno ovflPgno; + int rc; + int nOvfl; + int ovflPageSize; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + if( info.iOverflow==0 ){ + return SQLITE_OK; /* No overflow pages. Return without doing anything */ + } + ovflPgno = get4byte(&pCell[info.iOverflow]); + ovflPageSize = pBt->usableSize - 4; + nOvfl = (info.nPayload - info.nLocal + ovflPageSize - 1)/ovflPageSize; + assert( ovflPgno==0 || nOvfl>0 ); + while( nOvfl-- ){ + MemPage *pOvfl; + if( ovflPgno==0 || ovflPgno>sqlite3PagerPagecount(pBt->pPager) ){ + return SQLITE_CORRUPT_BKPT; + } + + rc = getOverflowPage(pBt, ovflPgno, &pOvfl, (nOvfl==0)?0:&ovflPgno); + if( rc ) return rc; + rc = freePage(pOvfl); + sqlite3PagerUnref(pOvfl->pDbPage); + if( rc ) return rc; + } + return SQLITE_OK; +} + +/* +** Create the byte sequence used to represent a cell on page pPage +** and write that byte sequence into pCell[]. Overflow pages are +** allocated and filled in as necessary. The calling procedure +** is responsible for making sure sufficient space has been allocated +** for pCell[]. +** +** Note that pCell does not necessary need to point to the pPage->aData +** area. pCell might point to some temporary storage. The cell will +** be constructed in this temporary area then copied into pPage->aData +** later. +*/ +static int fillInCell( + MemPage *pPage, /* The page that contains the cell */ + unsigned char *pCell, /* Complete text of the cell */ + const void *pKey, i64 nKey, /* The key */ + const void *pData,int nData, /* The data */ + int nZero, /* Extra zero bytes to append to pData */ + int *pnSize /* Write cell size here */ +){ + int nPayload; + const u8 *pSrc; + int nSrc, n, rc; + int spaceLeft; + MemPage *pOvfl = 0; + MemPage *pToRelease = 0; + unsigned char *pPrior; + unsigned char *pPayload; + BtShared *pBt = pPage->pBt; + Pgno pgnoOvfl = 0; + int nHeader; + CellInfo info; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + + /* Fill in the header. */ + nHeader = 0; + if( !pPage->leaf ){ + nHeader += 4; + } + if( pPage->hasData ){ + nHeader += putVarint(&pCell[nHeader], nData+nZero); + }else{ + nData = nZero = 0; + } + nHeader += putVarint(&pCell[nHeader], *(u64*)&nKey); + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + assert( info.nHeader==nHeader ); + assert( info.nKey==nKey ); + assert( info.nData==nData+nZero ); + + /* Fill in the payload */ + nPayload = nData + nZero; + if( pPage->intKey ){ + pSrc = pData; + nSrc = nData; + nData = 0; + }else{ + nPayload += nKey; + pSrc = pKey; + nSrc = nKey; + } + *pnSize = info.nSize; + spaceLeft = info.nLocal; + pPayload = &pCell[nHeader]; + pPrior = &pCell[info.iOverflow]; + + while( nPayload>0 ){ + if( spaceLeft==0 ){ + int isExact = 0; +#ifndef SQLITE_OMIT_AUTOVACUUM + Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */ + if( pBt->autoVacuum ){ + do{ + pgnoOvfl++; + } while( + PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt) + ); + if( pgnoOvfl>1 ){ + /* isExact = 1; */ + } + } +#endif + rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, isExact); +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If the database supports auto-vacuum, and the second or subsequent + ** overflow page is being allocated, add an entry to the pointer-map + ** for that page now. + ** + ** If this is the first overflow page, then write a partial entry + ** to the pointer-map. If we write nothing to this pointer-map slot, + ** then the optimistic overflow chain processing in clearCell() + ** may misinterpret the uninitialised values and delete the + ** wrong pages from the database. + */ + if( pBt->autoVacuum && rc==SQLITE_OK ){ + u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1); + rc = ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap); + if( rc ){ + releasePage(pOvfl); + } + } +#endif + if( rc ){ + releasePage(pToRelease); + return rc; + } + put4byte(pPrior, pgnoOvfl); + releasePage(pToRelease); + pToRelease = pOvfl; + pPrior = pOvfl->aData; + put4byte(pPrior, 0); + pPayload = &pOvfl->aData[4]; + spaceLeft = pBt->usableSize - 4; + } + n = nPayload; + if( n>spaceLeft ) n = spaceLeft; + if( nSrc>0 ){ + if( n>nSrc ) n = nSrc; + assert( pSrc ); + memcpy(pPayload, pSrc, n); + }else{ + memset(pPayload, 0, n); + } + nPayload -= n; + pPayload += n; + pSrc += n; + nSrc -= n; + spaceLeft -= n; + if( nSrc==0 ){ + nSrc = nData; + pSrc = pData; + } + } + releasePage(pToRelease); + return SQLITE_OK; +} + +/* +** Change the MemPage.pParent pointer on the page whose number is +** given in the second argument so that MemPage.pParent holds the +** pointer in the third argument. +*/ +static int reparentPage(BtShared *pBt, Pgno pgno, MemPage *pNewParent, int idx){ + MemPage *pThis; + DbPage *pDbPage; + + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pNewParent!=0 ); + if( pgno==0 ) return SQLITE_OK; + assert( pBt->pPager!=0 ); + pDbPage = sqlite3PagerLookup(pBt->pPager, pgno); + if( pDbPage ){ + pThis = (MemPage *)sqlite3PagerGetExtra(pDbPage); + if( pThis->isInit ){ + assert( pThis->aData==sqlite3PagerGetData(pDbPage) ); + if( pThis->pParent!=pNewParent ){ + if( pThis->pParent ) sqlite3PagerUnref(pThis->pParent->pDbPage); + pThis->pParent = pNewParent; + sqlite3PagerRef(pNewParent->pDbPage); + } + pThis->idxParent = idx; + } + sqlite3PagerUnref(pDbPage); + } + +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + return ptrmapPut(pBt, pgno, PTRMAP_BTREE, pNewParent->pgno); + } +#endif + return SQLITE_OK; +} + + + +/* +** Change the pParent pointer of all children of pPage to point back +** to pPage. +** +** In other words, for every child of pPage, invoke reparentPage() +** to make sure that each child knows that pPage is its parent. +** +** This routine gets called after you memcpy() one page into +** another. +*/ +static int reparentChildPages(MemPage *pPage){ + int i; + BtShared *pBt = pPage->pBt; + int rc = SQLITE_OK; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + if( pPage->leaf ) return SQLITE_OK; + + for(i=0; inCell; i++){ + u8 *pCell = findCell(pPage, i); + if( !pPage->leaf ){ + rc = reparentPage(pBt, get4byte(pCell), pPage, i); + if( rc!=SQLITE_OK ) return rc; + } + } + if( !pPage->leaf ){ + rc = reparentPage(pBt, get4byte(&pPage->aData[pPage->hdrOffset+8]), + pPage, i); + pPage->idxShift = 0; + } + return rc; +} + +/* +** Remove the i-th cell from pPage. This routine effects pPage only. +** The cell content is not freed or deallocated. It is assumed that +** the cell content has been copied someplace else. This routine just +** removes the reference to the cell from pPage. +** +** "sz" must be the number of bytes in the cell. +*/ +static void dropCell(MemPage *pPage, int idx, int sz){ + int i; /* Loop counter */ + int pc; /* Offset to cell content of cell being deleted */ + u8 *data; /* pPage->aData */ + u8 *ptr; /* Used to move bytes around within data[] */ + + assert( idx>=0 && idxnCell ); + assert( sz==cellSize(pPage, idx) ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + data = pPage->aData; + ptr = &data[pPage->cellOffset + 2*idx]; + pc = get2byte(ptr); + assert( pc>10 && pc+sz<=pPage->pBt->usableSize ); + freeSpace(pPage, pc, sz); + for(i=idx+1; inCell; i++, ptr+=2){ + ptr[0] = ptr[2]; + ptr[1] = ptr[3]; + } + pPage->nCell--; + put2byte(&data[pPage->hdrOffset+3], pPage->nCell); + pPage->nFree += 2; + pPage->idxShift = 1; +} + +/* +** Insert a new cell on pPage at cell index "i". pCell points to the +** content of the cell. +** +** If the cell content will fit on the page, then put it there. If it +** will not fit, then make a copy of the cell content into pTemp if +** pTemp is not null. Regardless of pTemp, allocate a new entry +** in pPage->aOvfl[] and make it point to the cell content (either +** in pTemp or the original pCell) and also record its index. +** Allocating a new entry in pPage->aCell[] implies that +** pPage->nOverflow is incremented. +** +** If nSkip is non-zero, then do not copy the first nSkip bytes of the +** cell. The caller will overwrite them after this function returns. If +** nSkip is non-zero, then pCell may not point to an invalid memory location +** (but pCell+nSkip is always valid). +*/ +static int insertCell( + MemPage *pPage, /* Page into which we are copying */ + int i, /* New cell becomes the i-th cell of the page */ + u8 *pCell, /* Content of the new cell */ + int sz, /* Bytes of content in pCell */ + u8 *pTemp, /* Temp storage space for pCell, if needed */ + u8 nSkip /* Do not write the first nSkip bytes of the cell */ +){ + int idx; /* Where to write new cell content in data[] */ + int j; /* Loop counter */ + int top; /* First byte of content for any cell in data[] */ + int end; /* First byte past the last cell pointer in data[] */ + int ins; /* Index in data[] where new cell pointer is inserted */ + int hdr; /* Offset into data[] of the page header */ + int cellOffset; /* Address of first cell pointer in data[] */ + u8 *data; /* The content of the whole page */ + u8 *ptr; /* Used for moving information around in data[] */ + + assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); + assert( sz==cellSizePtr(pPage, pCell) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + if( pPage->nOverflow || sz+2>pPage->nFree ){ + if( pTemp ){ + memcpy(pTemp+nSkip, pCell+nSkip, sz-nSkip); + pCell = pTemp; + } + j = pPage->nOverflow++; + assert( jaOvfl)/sizeof(pPage->aOvfl[0]) ); + pPage->aOvfl[j].pCell = pCell; + pPage->aOvfl[j].idx = i; + pPage->nFree = 0; + }else{ + int rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + data = pPage->aData; + hdr = pPage->hdrOffset; + top = get2byte(&data[hdr+5]); + cellOffset = pPage->cellOffset; + end = cellOffset + 2*pPage->nCell + 2; + ins = cellOffset + 2*i; + if( end > top - sz ){ + rc = defragmentPage(pPage); + if( rc!=SQLITE_OK ) return rc; + top = get2byte(&data[hdr+5]); + assert( end + sz <= top ); + } + idx = allocateSpace(pPage, sz); + assert( idx>0 ); + assert( end <= get2byte(&data[hdr+5]) ); + pPage->nCell++; + pPage->nFree -= 2; + memcpy(&data[idx+nSkip], pCell+nSkip, sz-nSkip); + for(j=end-2, ptr=&data[j]; j>ins; j-=2, ptr-=2){ + ptr[0] = ptr[-2]; + ptr[1] = ptr[-1]; + } + put2byte(&data[ins], idx); + put2byte(&data[hdr+3], pPage->nCell); + pPage->idxShift = 1; +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pPage->pBt->autoVacuum ){ + /* The cell may contain a pointer to an overflow page. If so, write + ** the entry for the overflow page into the pointer map. + */ + CellInfo info; + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + assert( (info.nData+(pPage->intKey?0:info.nKey))==info.nPayload ); + if( (info.nData+(pPage->intKey?0:info.nKey))>info.nLocal ){ + Pgno pgnoOvfl = get4byte(&pCell[info.iOverflow]); + rc = ptrmapPut(pPage->pBt, pgnoOvfl, PTRMAP_OVERFLOW1, pPage->pgno); + if( rc!=SQLITE_OK ) return rc; + } + } +#endif + } + + return SQLITE_OK; +} + +/* +** Add a list of cells to a page. The page should be initially empty. +** The cells are guaranteed to fit on the page. +*/ +static void assemblePage( + MemPage *pPage, /* The page to be assemblied */ + int nCell, /* The number of cells to add to this page */ + u8 **apCell, /* Pointers to cell bodies */ + int *aSize /* Sizes of the cells */ +){ + int i; /* Loop counter */ + int totalSize; /* Total size of all cells */ + int hdr; /* Index of page header */ + int cellptr; /* Address of next cell pointer */ + int cellbody; /* Address of next cell body */ + u8 *data; /* Data for the page */ + + assert( pPage->nOverflow==0 ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + totalSize = 0; + for(i=0; inFree ); + assert( pPage->nCell==0 ); + cellptr = pPage->cellOffset; + data = pPage->aData; + hdr = pPage->hdrOffset; + put2byte(&data[hdr+3], nCell); + if( nCell ){ + cellbody = allocateSpace(pPage, totalSize); + assert( cellbody>0 ); + assert( pPage->nFree >= 2*nCell ); + pPage->nFree -= 2*nCell; + for(i=0; ipBt->usableSize ); + } + pPage->nCell = nCell; +} + +/* +** The following parameters determine how many adjacent pages get involved +** in a balancing operation. NN is the number of neighbors on either side +** of the page that participate in the balancing operation. NB is the +** total number of pages that participate, including the target page and +** NN neighbors on either side. +** +** The minimum value of NN is 1 (of course). Increasing NN above 1 +** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance +** in exchange for a larger degradation in INSERT and UPDATE performance. +** The value of NN appears to give the best results overall. +*/ +#define NN 1 /* Number of neighbors on either side of pPage */ +#define NB (NN*2+1) /* Total pages involved in the balance */ + +/* Forward reference */ +static int balance(MemPage*, int); + +#ifndef SQLITE_OMIT_QUICKBALANCE +/* +** This version of balance() handles the common special case where +** a new entry is being inserted on the extreme right-end of the +** tree, in other words, when the new entry will become the largest +** entry in the tree. +** +** Instead of trying balance the 3 right-most leaf pages, just add +** a new page to the right-hand side and put the one new entry in +** that page. This leaves the right side of the tree somewhat +** unbalanced. But odds are that we will be inserting new entries +** at the end soon afterwards so the nearly empty page will quickly +** fill up. On average. +** +** pPage is the leaf page which is the right-most page in the tree. +** pParent is its parent. pPage must have a single overflow entry +** which is also the right-most entry on the page. +*/ +static int balance_quick(MemPage *pPage, MemPage *pParent){ + int rc; + MemPage *pNew; + Pgno pgnoNew; + u8 *pCell; + int szCell; + CellInfo info; + BtShared *pBt = pPage->pBt; + int parentIdx = pParent->nCell; /* pParent new divider cell index */ + int parentSize; /* Size of new divider cell */ + u8 parentCell[64]; /* Space for the new divider cell */ + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + + /* Allocate a new page. Insert the overflow cell from pPage + ** into it. Then remove the overflow cell from pPage. + */ + rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + pCell = pPage->aOvfl[0].pCell; + szCell = cellSizePtr(pPage, pCell); + zeroPage(pNew, pPage->aData[0]); + assemblePage(pNew, 1, &pCell, &szCell); + pPage->nOverflow = 0; + + /* Set the parent of the newly allocated page to pParent. */ + pNew->pParent = pParent; + sqlite3PagerRef(pParent->pDbPage); + + /* pPage is currently the right-child of pParent. Change this + ** so that the right-child is the new page allocated above and + ** pPage is the next-to-right child. + */ + assert( pPage->nCell>0 ); + pCell = findCell(pPage, pPage->nCell-1); + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + rc = fillInCell(pParent, parentCell, 0, info.nKey, 0, 0, 0, &parentSize); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( parentSize<64 ); + rc = insertCell(pParent, parentIdx, parentCell, parentSize, 0, 4); + if( rc!=SQLITE_OK ){ + return rc; + } + put4byte(findOverflowCell(pParent,parentIdx), pPage->pgno); + put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); + +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If this is an auto-vacuum database, update the pointer map + ** with entries for the new page, and any pointer from the + ** cell on the page to an overflow page. + */ + if( pBt->autoVacuum ){ + rc = ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno); + if( rc==SQLITE_OK ){ + rc = ptrmapPutOvfl(pNew, 0); + } + if( rc!=SQLITE_OK ){ + releasePage(pNew); + return rc; + } + } +#endif + + /* Release the reference to the new page and balance the parent page, + ** in case the divider cell inserted caused it to become overfull. + */ + releasePage(pNew); + return balance(pParent, 0); +} +#endif /* SQLITE_OMIT_QUICKBALANCE */ + +/* +** This routine redistributes Cells on pPage and up to NN*2 siblings +** of pPage so that all pages have about the same amount of free space. +** Usually NN siblings on either side of pPage is used in the balancing, +** though more siblings might come from one side if pPage is the first +** or last child of its parent. If pPage has fewer than 2*NN siblings +** (something which can only happen if pPage is the root page or a +** child of root) then all available siblings participate in the balancing. +** +** The number of siblings of pPage might be increased or decreased by one or +** two in an effort to keep pages nearly full but not over full. The root page +** is special and is allowed to be nearly empty. If pPage is +** the root page, then the depth of the tree might be increased +** or decreased by one, as necessary, to keep the root page from being +** overfull or completely empty. +** +** Note that when this routine is called, some of the Cells on pPage +** might not actually be stored in pPage->aData[]. This can happen +** if the page is overfull. Part of the job of this routine is to +** make sure all Cells for pPage once again fit in pPage->aData[]. +** +** In the course of balancing the siblings of pPage, the parent of pPage +** might become overfull or underfull. If that happens, then this routine +** is called recursively on the parent. +** +** If this routine fails for any reason, it might leave the database +** in a corrupted state. So if this routine fails, the database should +** be rolled back. +*/ +static int balance_nonroot(MemPage *pPage){ + MemPage *pParent; /* The parent of pPage */ + BtShared *pBt; /* The whole database */ + int nCell = 0; /* Number of cells in apCell[] */ + int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */ + int nOld; /* Number of pages in apOld[] */ + int nNew; /* Number of pages in apNew[] */ + int nDiv; /* Number of cells in apDiv[] */ + int i, j, k; /* Loop counters */ + int idx; /* Index of pPage in pParent->aCell[] */ + int nxDiv; /* Next divider slot in pParent->aCell[] */ + int rc; /* The return code */ + int leafCorrection; /* 4 if pPage is a leaf. 0 if not */ + int leafData; /* True if pPage is a leaf of a LEAFDATA tree */ + int usableSpace; /* Bytes in pPage beyond the header */ + int pageFlags; /* Value of pPage->aData[0] */ + int subtotal; /* Subtotal of bytes in cells on one page */ + int iSpace = 0; /* First unused byte of aSpace[] */ + MemPage *apOld[NB]; /* pPage and up to two siblings */ + Pgno pgnoOld[NB]; /* Page numbers for each page in apOld[] */ + MemPage *apCopy[NB]; /* Private copies of apOld[] pages */ + MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ + Pgno pgnoNew[NB+2]; /* Page numbers for each page in apNew[] */ + u8 *apDiv[NB]; /* Divider cells in pParent */ + int cntNew[NB+2]; /* Index in aCell[] of cell after i-th page */ + int szNew[NB+2]; /* Combined size of cells place on i-th page */ + u8 **apCell = 0; /* All cells begin balanced */ + int *szCell; /* Local size of all cells in apCell[] */ + u8 *aCopy[NB]; /* Space for holding data of apCopy[] */ + u8 *aSpace; /* Space to hold copies of dividers cells */ +#ifndef SQLITE_OMIT_AUTOVACUUM + u8 *aFrom = 0; +#endif + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + + /* + ** Find the parent page. + */ + assert( pPage->isInit ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) || pPage->nOverflow==1 ); + pBt = pPage->pBt; + pParent = pPage->pParent; + assert( pParent ); + if( SQLITE_OK!=(rc = sqlite3PagerWrite(pParent->pDbPage)) ){ + return rc; + } + TRACE(("BALANCE: begin page %d child of %d\n", pPage->pgno, pParent->pgno)); + +#ifndef SQLITE_OMIT_QUICKBALANCE + /* + ** A special case: If a new entry has just been inserted into a + ** table (that is, a btree with integer keys and all data at the leaves) + ** and the new entry is the right-most entry in the tree (it has the + ** largest key) then use the special balance_quick() routine for + ** balancing. balance_quick() is much faster and results in a tighter + ** packing of data in the common case. + */ + if( pPage->leaf && + pPage->intKey && + pPage->leafData && + pPage->nOverflow==1 && + pPage->aOvfl[0].idx==pPage->nCell && + pPage->pParent->pgno!=1 && + get4byte(&pParent->aData[pParent->hdrOffset+8])==pPage->pgno + ){ + /* + ** TODO: Check the siblings to the left of pPage. It may be that + ** they are not full and no new page is required. + */ + return balance_quick(pPage, pParent); + } +#endif + + if( SQLITE_OK!=(rc = sqlite3PagerWrite(pPage->pDbPage)) ){ + return rc; + } + + /* + ** Find the cell in the parent page whose left child points back + ** to pPage. The "idx" variable is the index of that cell. If pPage + ** is the rightmost child of pParent then set idx to pParent->nCell + */ + if( pParent->idxShift ){ + Pgno pgno; + pgno = pPage->pgno; + assert( pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); + for(idx=0; idxnCell; idx++){ + if( get4byte(findCell(pParent, idx))==pgno ){ + break; + } + } + assert( idxnCell + || get4byte(&pParent->aData[pParent->hdrOffset+8])==pgno ); + }else{ + idx = pPage->idxParent; + } + + /* + ** Initialize variables so that it will be safe to jump + ** directly to balance_cleanup at any moment. + */ + nOld = nNew = 0; + sqlite3PagerRef(pParent->pDbPage); + + /* + ** Find sibling pages to pPage and the cells in pParent that divide + ** the siblings. An attempt is made to find NN siblings on either + ** side of pPage. More siblings are taken from one side, however, if + ** pPage there are fewer than NN siblings on the other side. If pParent + ** has NB or fewer children then all children of pParent are taken. + */ + nxDiv = idx - NN; + if( nxDiv + NB > pParent->nCell ){ + nxDiv = pParent->nCell - NB + 1; + } + if( nxDiv<0 ){ + nxDiv = 0; + } + nDiv = 0; + for(i=0, k=nxDiv; inCell ){ + apDiv[i] = findCell(pParent, k); + nDiv++; + assert( !pParent->leaf ); + pgnoOld[i] = get4byte(apDiv[i]); + }else if( k==pParent->nCell ){ + pgnoOld[i] = get4byte(&pParent->aData[pParent->hdrOffset+8]); + }else{ + break; + } + rc = getAndInitPage(pBt, pgnoOld[i], &apOld[i], pParent); + if( rc ) goto balance_cleanup; + apOld[i]->idxParent = k; + apCopy[i] = 0; + assert( i==nOld ); + nOld++; + nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; + } + + /* Make nMaxCells a multiple of 2 in order to preserve 8-byte + ** alignment */ + nMaxCells = (nMaxCells + 1)&~1; + + /* + ** Allocate space for memory structures + */ + apCell = sqlite3_malloc( + nMaxCells*sizeof(u8*) /* apCell */ + + nMaxCells*sizeof(int) /* szCell */ + + ROUND8(sizeof(MemPage))*NB /* aCopy */ + + pBt->pageSize*(5+NB) /* aSpace */ + + (ISAUTOVACUUM ? nMaxCells : 0) /* aFrom */ + ); + if( apCell==0 ){ + rc = SQLITE_NOMEM; + goto balance_cleanup; + } + szCell = (int*)&apCell[nMaxCells]; + aCopy[0] = (u8*)&szCell[nMaxCells]; + assert( ((aCopy[0] - (u8*)apCell) & 7)==0 ); /* 8-byte alignment required */ + for(i=1; ipageSize+ROUND8(sizeof(MemPage))]; + assert( ((aCopy[i] - (u8*)apCell) & 7)==0 ); /* 8-byte alignment required */ + } + aSpace = &aCopy[NB-1][pBt->pageSize+ROUND8(sizeof(MemPage))]; + assert( ((aSpace - (u8*)apCell) & 7)==0 ); /* 8-byte alignment required */ +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + aFrom = &aSpace[5*pBt->pageSize]; + } +#endif + + /* + ** Make copies of the content of pPage and its siblings into aOld[]. + ** The rest of this function will use data from the copies rather + ** that the original pages since the original pages will be in the + ** process of being overwritten. + */ + for(i=0; iaData = (void*)&p[1]; + memcpy(p->aData, apOld[i]->aData, pBt->pageSize); + } + + /* + ** Load pointers to all cells on sibling pages and the divider cells + ** into the local apCell[] array. Make copies of the divider cells + ** into space obtained form aSpace[] and remove the the divider Cells + ** from pParent. + ** + ** If the siblings are on leaf pages, then the child pointers of the + ** divider cells are stripped from the cells before they are copied + ** into aSpace[]. In this way, all cells in apCell[] are without + ** child pointers. If siblings are not leaves, then all cell in + ** apCell[] include child pointers. Either way, all cells in apCell[] + ** are alike. + ** + ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf. + ** leafData: 1 if pPage holds key+data and pParent holds only keys. + */ + nCell = 0; + leafCorrection = pPage->leaf*4; + leafData = pPage->leafData && pPage->leaf; + for(i=0; inCell+pOld->nOverflow; + for(j=0; jautoVacuum ){ + int a; + aFrom[nCell] = i; + for(a=0; anOverflow; a++){ + if( pOld->aOvfl[a].pCell==apCell[nCell] ){ + aFrom[nCell] = 0xFF; + break; + } + } + } +#endif + nCell++; + } + if( ipageSize*5 ); + memcpy(pTemp, apDiv[i], sz); + apCell[nCell] = pTemp+leafCorrection; +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + aFrom[nCell] = 0xFF; + } +#endif + dropCell(pParent, nxDiv, sz); + szCell[nCell] -= leafCorrection; + assert( get4byte(pTemp)==pgnoOld[i] ); + if( !pOld->leaf ){ + assert( leafCorrection==0 ); + /* The right pointer of the child page pOld becomes the left + ** pointer of the divider cell */ + memcpy(apCell[nCell], &pOld->aData[pOld->hdrOffset+8], 4); + }else{ + assert( leafCorrection==4 ); + if( szCell[nCell]<4 ){ + /* Do not allow any cells smaller than 4 bytes. */ + szCell[nCell] = 4; + } + } + nCell++; + } + } + } + + /* + ** Figure out the number of pages needed to hold all nCell cells. + ** Store this number in "k". Also compute szNew[] which is the total + ** size of all cells on the i-th page and cntNew[] which is the index + ** in apCell[] of the cell that divides page i from page i+1. + ** cntNew[k] should equal nCell. + ** + ** Values computed by this block: + ** + ** k: The total number of sibling pages + ** szNew[i]: Spaced used on the i-th sibling page. + ** cntNew[i]: Index in apCell[] and szCell[] for the first cell to + ** the right of the i-th sibling page. + ** usableSpace: Number of bytes of space available on each sibling. + ** + */ + usableSpace = pBt->usableSize - 12 + leafCorrection; + for(subtotal=k=i=0; i usableSpace ){ + szNew[k] = subtotal - szCell[i]; + cntNew[k] = i; + if( leafData ){ i--; } + subtotal = 0; + k++; + } + } + szNew[k] = subtotal; + cntNew[k] = nCell; + k++; + + /* + ** The packing computed by the previous block is biased toward the siblings + ** on the left side. The left siblings are always nearly full, while the + ** right-most sibling might be nearly empty. This block of code attempts + ** to adjust the packing of siblings to get a better balance. + ** + ** This adjustment is more than an optimization. The packing above might + ** be so out of balance as to be illegal. For example, the right-most + ** sibling might be completely empty. This adjustment is not optional. + */ + for(i=k-1; i>0; i--){ + int szRight = szNew[i]; /* Size of sibling on the right */ + int szLeft = szNew[i-1]; /* Size of sibling on the left */ + int r; /* Index of right-most cell in left sibling */ + int d; /* Index of first cell to the left of right sibling */ + + r = cntNew[i-1] - 1; + d = r + 1 - leafData; + assert( d0) or we are the + ** a virtual root page. A virtual root page is when the real root + ** page is page 1 and we are the only child of that page. + */ + assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) ); + + /* + ** Allocate k new pages. Reuse old pages where possible. + */ + assert( pPage->pgno>1 ); + pageFlags = pPage->aData[0]; + for(i=0; ipDbPage); + nNew++; + if( rc ) goto balance_cleanup; + }else{ + assert( i>0 ); + rc = allocateBtreePage(pBt, &pNew, &pgnoNew[i], pgnoNew[i-1], 0); + if( rc ) goto balance_cleanup; + apNew[i] = pNew; + nNew++; + } + zeroPage(pNew, pageFlags); + } + + /* Free any old pages that were not reused as new pages. + */ + while( ii ){ + int t; + MemPage *pT; + t = pgnoNew[i]; + pT = apNew[i]; + pgnoNew[i] = pgnoNew[minI]; + apNew[i] = apNew[minI]; + pgnoNew[minI] = t; + apNew[minI] = pT; + } + } + TRACE(("BALANCE: old: %d %d %d new: %d(%d) %d(%d) %d(%d) %d(%d) %d(%d)\n", + pgnoOld[0], + nOld>=2 ? pgnoOld[1] : 0, + nOld>=3 ? pgnoOld[2] : 0, + pgnoNew[0], szNew[0], + nNew>=2 ? pgnoNew[1] : 0, nNew>=2 ? szNew[1] : 0, + nNew>=3 ? pgnoNew[2] : 0, nNew>=3 ? szNew[2] : 0, + nNew>=4 ? pgnoNew[3] : 0, nNew>=4 ? szNew[3] : 0, + nNew>=5 ? pgnoNew[4] : 0, nNew>=5 ? szNew[4] : 0)); + + /* + ** Evenly distribute the data in apCell[] across the new pages. + ** Insert divider cells into pParent as necessary. + */ + j = 0; + for(i=0; ipgno==pgnoNew[i] ); + assemblePage(pNew, cntNew[i]-j, &apCell[j], &szCell[j]); + assert( pNew->nCell>0 || (nNew==1 && cntNew[0]==0) ); + assert( pNew->nOverflow==0 ); + +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If this is an auto-vacuum database, update the pointer map entries + ** that point to the siblings that were rearranged. These can be: left + ** children of cells, the right-child of the page, or overflow pages + ** pointed to by cells. + */ + if( pBt->autoVacuum ){ + for(k=j; kpgno!=pNew->pgno ){ + rc = ptrmapPutOvfl(pNew, k-j); + if( rc!=SQLITE_OK ){ + goto balance_cleanup; + } + } + } + } +#endif + + j = cntNew[i]; + + /* If the sibling page assembled above was not the right-most sibling, + ** insert a divider cell into the parent page. + */ + if( ileaf ){ + memcpy(&pNew->aData[8], pCell, 4); + pTemp = 0; + }else if( leafData ){ + /* If the tree is a leaf-data tree, and the siblings are leaves, + ** then there is no divider cell in apCell[]. Instead, the divider + ** cell consists of the integer key for the right-most cell of + ** the sibling-page assembled above only. + */ + CellInfo info; + j--; + sqlite3BtreeParseCellPtr(pNew, apCell[j], &info); + pCell = &aSpace[iSpace]; + fillInCell(pParent, pCell, 0, info.nKey, 0, 0, 0, &sz); + iSpace += sz; + assert( iSpace<=pBt->pageSize*5 ); + pTemp = 0; + }else{ + pCell -= 4; + pTemp = &aSpace[iSpace]; + iSpace += sz; + assert( iSpace<=pBt->pageSize*5 ); + /* Obscure case for non-leaf-data trees: If the cell at pCell was + ** previously stored on a leaf node, and it's reported size was 4 + ** bytes, then it may actually be smaller than this + ** (see sqlite3BtreeParseCellPtr(), 4 bytes is the minimum size of + ** any cell). But it's important to pass the correct size to + ** insertCell(), so reparse the cell now. + ** + ** Note that this can never happen in an SQLite data file, as all + ** cells are at least 4 bytes. It only happens in b-trees used + ** to evaluate "IN (SELECT ...)" and similar clauses. + */ + if( szCell[j]==4 ){ + assert(leafCorrection==4); + sz = cellSizePtr(pParent, pCell); + } + } + rc = insertCell(pParent, nxDiv, pCell, sz, pTemp, 4); + if( rc!=SQLITE_OK ) goto balance_cleanup; + put4byte(findOverflowCell(pParent,nxDiv), pNew->pgno); +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If this is an auto-vacuum database, and not a leaf-data tree, + ** then update the pointer map with an entry for the overflow page + ** that the cell just inserted points to (if any). + */ + if( pBt->autoVacuum && !leafData ){ + rc = ptrmapPutOvfl(pParent, nxDiv); + if( rc!=SQLITE_OK ){ + goto balance_cleanup; + } + } +#endif + j++; + nxDiv++; + } + } + assert( j==nCell ); + assert( nOld>0 ); + assert( nNew>0 ); + if( (pageFlags & PTF_LEAF)==0 ){ + memcpy(&apNew[nNew-1]->aData[8], &apCopy[nOld-1]->aData[8], 4); + } + if( nxDiv==pParent->nCell+pParent->nOverflow ){ + /* Right-most sibling is the right-most child of pParent */ + put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew[nNew-1]); + }else{ + /* Right-most sibling is the left child of the first entry in pParent + ** past the right-most divider entry */ + put4byte(findOverflowCell(pParent, nxDiv), pgnoNew[nNew-1]); + } + + /* + ** Reparent children of all cells. + */ + for(i=0; iisInit ); + rc = balance(pParent, 0); + + /* + ** Cleanup before returning. + */ +balance_cleanup: + sqlite3_free(apCell); + for(i=0; ipgno, nOld, nNew, nCell)); + return rc; +} + +/* +** This routine is called for the root page of a btree when the root +** page contains no cells. This is an opportunity to make the tree +** shallower by one level. +*/ +static int balance_shallower(MemPage *pPage){ + MemPage *pChild; /* The only child page of pPage */ + Pgno pgnoChild; /* Page number for pChild */ + int rc = SQLITE_OK; /* Return code from subprocedures */ + BtShared *pBt; /* The main BTree structure */ + int mxCellPerPage; /* Maximum number of cells per page */ + u8 **apCell; /* All cells from pages being balanced */ + int *szCell; /* Local size of all cells */ + + assert( pPage->pParent==0 ); + assert( pPage->nCell==0 ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pBt = pPage->pBt; + mxCellPerPage = MX_CELL(pBt); + apCell = sqlite3_malloc( mxCellPerPage*(sizeof(u8*)+sizeof(int)) ); + if( apCell==0 ) return SQLITE_NOMEM; + szCell = (int*)&apCell[mxCellPerPage]; + if( pPage->leaf ){ + /* The table is completely empty */ + TRACE(("BALANCE: empty table %d\n", pPage->pgno)); + }else{ + /* The root page is empty but has one child. Transfer the + ** information from that one child into the root page if it + ** will fit. This reduces the depth of the tree by one. + ** + ** If the root page is page 1, it has less space available than + ** its child (due to the 100 byte header that occurs at the beginning + ** of the database fle), so it might not be able to hold all of the + ** information currently contained in the child. If this is the + ** case, then do not do the transfer. Leave page 1 empty except + ** for the right-pointer to the child page. The child page becomes + ** the virtual root of the tree. + */ + pgnoChild = get4byte(&pPage->aData[pPage->hdrOffset+8]); + assert( pgnoChild>0 ); + assert( pgnoChild<=sqlite3PagerPagecount(pPage->pBt->pPager) ); + rc = sqlite3BtreeGetPage(pPage->pBt, pgnoChild, &pChild, 0); + if( rc ) goto end_shallow_balance; + if( pPage->pgno==1 ){ + rc = sqlite3BtreeInitPage(pChild, pPage); + if( rc ) goto end_shallow_balance; + assert( pChild->nOverflow==0 ); + if( pChild->nFree>=100 ){ + /* The child information will fit on the root page, so do the + ** copy */ + int i; + zeroPage(pPage, pChild->aData[0]); + for(i=0; inCell; i++){ + apCell[i] = findCell(pChild,i); + szCell[i] = cellSizePtr(pChild, apCell[i]); + } + assemblePage(pPage, pChild->nCell, apCell, szCell); + /* Copy the right-pointer of the child to the parent. */ + put4byte(&pPage->aData[pPage->hdrOffset+8], + get4byte(&pChild->aData[pChild->hdrOffset+8])); + freePage(pChild); + TRACE(("BALANCE: child %d transfer to page 1\n", pChild->pgno)); + }else{ + /* The child has more information that will fit on the root. + ** The tree is already balanced. Do nothing. */ + TRACE(("BALANCE: child %d will not fit on page 1\n", pChild->pgno)); + } + }else{ + memcpy(pPage->aData, pChild->aData, pPage->pBt->usableSize); + pPage->isInit = 0; + pPage->pParent = 0; + rc = sqlite3BtreeInitPage(pPage, 0); + assert( rc==SQLITE_OK ); + freePage(pChild); + TRACE(("BALANCE: transfer child %d into root %d\n", + pChild->pgno, pPage->pgno)); + } + rc = reparentChildPages(pPage); + assert( pPage->nOverflow==0 ); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + int i; + for(i=0; inCell; i++){ + rc = ptrmapPutOvfl(pPage, i); + if( rc!=SQLITE_OK ){ + goto end_shallow_balance; + } + } + } +#endif + releasePage(pChild); + } +end_shallow_balance: + sqlite3_free(apCell); + return rc; +} + + +/* +** The root page is overfull +** +** When this happens, Create a new child page and copy the +** contents of the root into the child. Then make the root +** page an empty page with rightChild pointing to the new +** child. Finally, call balance_internal() on the new child +** to cause it to split. +*/ +static int balance_deeper(MemPage *pPage){ + int rc; /* Return value from subprocedures */ + MemPage *pChild; /* Pointer to a new child page */ + Pgno pgnoChild; /* Page number of the new child page */ + BtShared *pBt; /* The BTree */ + int usableSize; /* Total usable size of a page */ + u8 *data; /* Content of the parent page */ + u8 *cdata; /* Content of the child page */ + int hdr; /* Offset to page header in parent */ + int brk; /* Offset to content of first cell in parent */ + + assert( pPage->pParent==0 ); + assert( pPage->nOverflow>0 ); + pBt = pPage->pBt; + assert( sqlite3_mutex_held(pBt->mutex) ); + rc = allocateBtreePage(pBt, &pChild, &pgnoChild, pPage->pgno, 0); + if( rc ) return rc; + assert( sqlite3PagerIswriteable(pChild->pDbPage) ); + usableSize = pBt->usableSize; + data = pPage->aData; + hdr = pPage->hdrOffset; + brk = get2byte(&data[hdr+5]); + cdata = pChild->aData; + memcpy(cdata, &data[hdr], pPage->cellOffset+2*pPage->nCell-hdr); + memcpy(&cdata[brk], &data[brk], usableSize-brk); + assert( pChild->isInit==0 ); + rc = sqlite3BtreeInitPage(pChild, pPage); + if( rc ) goto balancedeeper_out; + memcpy(pChild->aOvfl, pPage->aOvfl, pPage->nOverflow*sizeof(pPage->aOvfl[0])); + pChild->nOverflow = pPage->nOverflow; + if( pChild->nOverflow ){ + pChild->nFree = 0; + } + assert( pChild->nCell==pPage->nCell ); + zeroPage(pPage, pChild->aData[0] & ~PTF_LEAF); + put4byte(&pPage->aData[pPage->hdrOffset+8], pgnoChild); + TRACE(("BALANCE: copy root %d into %d\n", pPage->pgno, pChild->pgno)); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + int i; + rc = ptrmapPut(pBt, pChild->pgno, PTRMAP_BTREE, pPage->pgno); + if( rc ) goto balancedeeper_out; + for(i=0; inCell; i++){ + rc = ptrmapPutOvfl(pChild, i); + if( rc!=SQLITE_OK ){ + return rc; + } + } + } +#endif + rc = balance_nonroot(pChild); + +balancedeeper_out: + releasePage(pChild); + return rc; +} + +/* +** Decide if the page pPage needs to be balanced. If balancing is +** required, call the appropriate balancing routine. +*/ +static int balance(MemPage *pPage, int insert){ + int rc = SQLITE_OK; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + if( pPage->pParent==0 ){ + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc==SQLITE_OK && pPage->nOverflow>0 ){ + rc = balance_deeper(pPage); + } + if( rc==SQLITE_OK && pPage->nCell==0 ){ + rc = balance_shallower(pPage); + } + }else{ + if( pPage->nOverflow>0 || + (!insert && pPage->nFree>pPage->pBt->usableSize*2/3) ){ + rc = balance_nonroot(pPage); + } + } + return rc; +} + +/* +** This routine checks all cursors that point to table pgnoRoot. +** If any of those cursors were opened with wrFlag==0 in a different +** database connection (a database connection that shares the pager +** cache with the current connection) and that other connection +** is not in the ReadUncommmitted state, then this routine returns +** SQLITE_LOCKED. +** +** In addition to checking for read-locks (where a read-lock +** means a cursor opened with wrFlag==0) this routine also moves +** all write cursors so that they are pointing to the +** first Cell on the root page. This is necessary because an insert +** or delete might change the number of cells on a page or delete +** a page entirely and we do not want to leave any cursors +** pointing to non-existant pages or cells. +*/ +static int checkReadLocks(Btree *pBtree, Pgno pgnoRoot, BtCursor *pExclude){ + BtCursor *p; + BtShared *pBt = pBtree->pBt; + sqlite3 *db = pBtree->pSqlite; + assert( sqlite3BtreeHoldsMutex(pBtree) ); + for(p=pBt->pCursor; p; p=p->pNext){ + if( p==pExclude ) continue; + if( p->eState!=CURSOR_VALID ) continue; + if( p->pgnoRoot!=pgnoRoot ) continue; + if( p->wrFlag==0 ){ + sqlite3 *dbOther = p->pBtree->pSqlite; + if( dbOther==0 || + (dbOther!=db && (dbOther->flags & SQLITE_ReadUncommitted)==0) ){ + return SQLITE_LOCKED; + } + }else if( p->pPage->pgno!=p->pgnoRoot ){ + moveToRoot(p); + } + } + return SQLITE_OK; +} + +/* +** Insert a new record into the BTree. The key is given by (pKey,nKey) +** and the data is given by (pData,nData). The cursor is used only to +** define what table the record should be inserted into. The cursor +** is left pointing at a random location. +** +** For an INTKEY table, only the nKey value of the key is used. pKey is +** ignored. For a ZERODATA table, the pData and nData are both ignored. +*/ +int sqlite3BtreeInsert( + BtCursor *pCur, /* Insert data into the table of this cursor */ + const void *pKey, i64 nKey, /* The key of the new record */ + const void *pData, int nData, /* The data of the new record */ + int nZero, /* Number of extra 0 bytes to append to data */ + int appendBias /* True if this is likely an append */ +){ + int rc; + int loc; + int szNew; + MemPage *pPage; + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; + unsigned char *oldCell; + unsigned char *newCell = 0; + + assert( cursorHoldsMutex(pCur) ); + if( pBt->inTransaction!=TRANS_WRITE ){ + /* Must start a transaction before doing an insert */ + rc = pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; + return rc; + } + assert( !pBt->readOnly ); + if( !pCur->wrFlag ){ + return SQLITE_PERM; /* Cursor not open for writing */ + } + if( checkReadLocks(pCur->pBtree, pCur->pgnoRoot, pCur) ){ + return SQLITE_LOCKED; /* The table pCur points to has a read lock */ + } + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skip; + } + + /* Save the positions of any other cursors open on this table */ + clearCursorPosition(pCur); + if( + SQLITE_OK!=(rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur)) || + SQLITE_OK!=(rc = sqlite3BtreeMoveto(pCur, pKey, nKey, appendBias, &loc)) + ){ + return rc; + } + + pPage = pCur->pPage; + assert( pPage->intKey || nKey>=0 ); + assert( pPage->leaf || !pPage->leafData ); + TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", + pCur->pgnoRoot, nKey, nData, pPage->pgno, + loc==0 ? "overwrite" : "new entry")); + assert( pPage->isInit ); + newCell = sqlite3_malloc( MX_CELL_SIZE(pBt) ); + if( newCell==0 ) return SQLITE_NOMEM; + rc = fillInCell(pPage, newCell, pKey, nKey, pData, nData, nZero, &szNew); + if( rc ) goto end_insert; + assert( szNew==cellSizePtr(pPage, newCell) ); + assert( szNew<=MX_CELL_SIZE(pBt) ); + if( loc==0 && CURSOR_VALID==pCur->eState ){ + int szOld; + assert( pCur->idx>=0 && pCur->idxnCell ); + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc ){ + goto end_insert; + } + oldCell = findCell(pPage, pCur->idx); + if( !pPage->leaf ){ + memcpy(newCell, oldCell, 4); + } + szOld = cellSizePtr(pPage, oldCell); + rc = clearCell(pPage, oldCell); + if( rc ) goto end_insert; + dropCell(pPage, pCur->idx, szOld); + }else if( loc<0 && pPage->nCell>0 ){ + assert( pPage->leaf ); + pCur->idx++; + pCur->info.nSize = 0; + }else{ + assert( pPage->leaf ); + } + rc = insertCell(pPage, pCur->idx, newCell, szNew, 0, 0); + if( rc!=SQLITE_OK ) goto end_insert; + rc = balance(pPage, 1); + /* sqlite3BtreePageDump(pCur->pBt, pCur->pgnoRoot, 1); */ + /* fflush(stdout); */ + if( rc==SQLITE_OK ){ + moveToRoot(pCur); + } +end_insert: + sqlite3_free(newCell); + return rc; +} + +/* +** Delete the entry that the cursor is pointing to. The cursor +** is left pointing at a random location. +*/ +int sqlite3BtreeDelete(BtCursor *pCur){ + MemPage *pPage = pCur->pPage; + unsigned char *pCell; + int rc; + Pgno pgnoChild = 0; + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; + + assert( cursorHoldsMutex(pCur) ); + assert( pPage->isInit ); + if( pBt->inTransaction!=TRANS_WRITE ){ + /* Must start a transaction before doing a delete */ + rc = pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; + return rc; + } + assert( !pBt->readOnly ); + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skip; + } + if( pCur->idx >= pPage->nCell ){ + return SQLITE_ERROR; /* The cursor is not pointing to anything */ + } + if( !pCur->wrFlag ){ + return SQLITE_PERM; /* Did not open this cursor for writing */ + } + if( checkReadLocks(pCur->pBtree, pCur->pgnoRoot, pCur) ){ + return SQLITE_LOCKED; /* The table pCur points to has a read lock */ + } + + /* Restore the current cursor position (a no-op if the cursor is not in + ** CURSOR_REQUIRESEEK state) and save the positions of any other cursors + ** open on the same table. Then call sqlite3PagerWrite() on the page + ** that the entry will be deleted from. + */ + if( + (rc = restoreOrClearCursorPosition(pCur))!=0 || + (rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur))!=0 || + (rc = sqlite3PagerWrite(pPage->pDbPage))!=0 + ){ + return rc; + } + + /* Locate the cell within it's page and leave pCell pointing to the + ** data. The clearCell() call frees any overflow pages associated with the + ** cell. The cell itself is still intact. + */ + pCell = findCell(pPage, pCur->idx); + if( !pPage->leaf ){ + pgnoChild = get4byte(pCell); + } + rc = clearCell(pPage, pCell); + if( rc ){ + return rc; + } + + if( !pPage->leaf ){ + /* + ** The entry we are about to delete is not a leaf so if we do not + ** do something we will leave a hole on an internal page. + ** We have to fill the hole by moving in a cell from a leaf. The + ** next Cell after the one to be deleted is guaranteed to exist and + ** to be a leaf so we can use it. + */ + BtCursor leafCur; + unsigned char *pNext; + int szNext; /* The compiler warning is wrong: szNext is always + ** initialized before use. Adding an extra initialization + ** to silence the compiler slows down the code. */ + int notUsed; + unsigned char *tempCell = 0; + assert( !pPage->leafData ); + sqlite3BtreeGetTempCursor(pCur, &leafCur); + rc = sqlite3BtreeNext(&leafCur, ¬Used); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(leafCur.pPage->pDbPage); + } + if( rc==SQLITE_OK ){ + TRACE(("DELETE: table=%d delete internal from %d replace from leaf %d\n", + pCur->pgnoRoot, pPage->pgno, leafCur.pPage->pgno)); + dropCell(pPage, pCur->idx, cellSizePtr(pPage, pCell)); + pNext = findCell(leafCur.pPage, leafCur.idx); + szNext = cellSizePtr(leafCur.pPage, pNext); + assert( MX_CELL_SIZE(pBt)>=szNext+4 ); + tempCell = sqlite3_malloc( MX_CELL_SIZE(pBt) ); + if( tempCell==0 ){ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + rc = insertCell(pPage, pCur->idx, pNext-4, szNext+4, tempCell, 0); + } + if( rc==SQLITE_OK ){ + put4byte(findOverflowCell(pPage, pCur->idx), pgnoChild); + rc = balance(pPage, 0); + } + if( rc==SQLITE_OK ){ + dropCell(leafCur.pPage, leafCur.idx, szNext); + rc = balance(leafCur.pPage, 0); + } + sqlite3_free(tempCell); + sqlite3BtreeReleaseTempCursor(&leafCur); + }else{ + TRACE(("DELETE: table=%d delete from leaf %d\n", + pCur->pgnoRoot, pPage->pgno)); + dropCell(pPage, pCur->idx, cellSizePtr(pPage, pCell)); + rc = balance(pPage, 0); + } + if( rc==SQLITE_OK ){ + moveToRoot(pCur); + } + return rc; +} + +/* +** Create a new BTree table. Write into *piTable the page +** number for the root page of the new table. +** +** The type of type is determined by the flags parameter. Only the +** following values of flags are currently in use. Other values for +** flags might not work: +** +** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys +** BTREE_ZERODATA Used for SQL indices +*/ +static int btreeCreateTable(Btree *p, int *piTable, int flags){ + BtShared *pBt = p->pBt; + MemPage *pRoot; + Pgno pgnoRoot; + int rc; + + assert( sqlite3BtreeHoldsMutex(p) ); + if( pBt->inTransaction!=TRANS_WRITE ){ + /* Must start a transaction first */ + rc = pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; + return rc; + } + assert( !pBt->readOnly ); + +#ifdef SQLITE_OMIT_AUTOVACUUM + rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); + if( rc ){ + return rc; + } +#else + if( pBt->autoVacuum ){ + Pgno pgnoMove; /* Move a page here to make room for the root-page */ + MemPage *pPageMove; /* The page to move to. */ + + /* Creating a new table may probably require moving an existing database + ** to make room for the new tables root page. In case this page turns + ** out to be an overflow page, delete all overflow page-map caches + ** held by open cursors. + */ + invalidateAllOverflowCache(pBt); + + /* Read the value of meta[3] from the database to determine where the + ** root page of the new table should go. meta[3] is the largest root-page + ** created so far, so the new root-page is (meta[3]+1). + */ + rc = sqlite3BtreeGetMeta(p, 4, &pgnoRoot); + if( rc!=SQLITE_OK ){ + return rc; + } + pgnoRoot++; + + /* The new root-page may not be allocated on a pointer-map page, or the + ** PENDING_BYTE page. + */ + if( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) || + pgnoRoot==PENDING_BYTE_PAGE(pBt) ){ + pgnoRoot++; + } + assert( pgnoRoot>=3 ); + + /* Allocate a page. The page that currently resides at pgnoRoot will + ** be moved to the allocated page (unless the allocated page happens + ** to reside at pgnoRoot). + */ + rc = allocateBtreePage(pBt, &pPageMove, &pgnoMove, pgnoRoot, 1); + if( rc!=SQLITE_OK ){ + return rc; + } + + if( pgnoMove!=pgnoRoot ){ + /* pgnoRoot is the page that will be used for the root-page of + ** the new table (assuming an error did not occur). But we were + ** allocated pgnoMove. If required (i.e. if it was not allocated + ** by extending the file), the current page at position pgnoMove + ** is already journaled. + */ + u8 eType; + Pgno iPtrPage; + + releasePage(pPageMove); + + /* Move the page currently at pgnoRoot to pgnoMove. */ + rc = sqlite3BtreeGetPage(pBt, pgnoRoot, &pRoot, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); + if( rc!=SQLITE_OK || eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ + releasePage(pRoot); + return rc; + } + assert( eType!=PTRMAP_ROOTPAGE ); + assert( eType!=PTRMAP_FREEPAGE ); + rc = sqlite3PagerWrite(pRoot->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(pRoot); + return rc; + } + rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove); + releasePage(pRoot); + + /* Obtain the page at pgnoRoot */ + if( rc!=SQLITE_OK ){ + return rc; + } + rc = sqlite3BtreeGetPage(pBt, pgnoRoot, &pRoot, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = sqlite3PagerWrite(pRoot->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(pRoot); + return rc; + } + }else{ + pRoot = pPageMove; + } + + /* Update the pointer-map and meta-data with the new root-page number. */ + rc = ptrmapPut(pBt, pgnoRoot, PTRMAP_ROOTPAGE, 0); + if( rc ){ + releasePage(pRoot); + return rc; + } + rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot); + if( rc ){ + releasePage(pRoot); + return rc; + } + + }else{ + rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); + if( rc ) return rc; + } +#endif + assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); + zeroPage(pRoot, flags | PTF_LEAF); + sqlite3PagerUnref(pRoot->pDbPage); + *piTable = (int)pgnoRoot; + return SQLITE_OK; +} +int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeCreateTable(p, piTable, flags); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Erase the given database page and all its children. Return +** the page to the freelist. +*/ +static int clearDatabasePage( + BtShared *pBt, /* The BTree that contains the table */ + Pgno pgno, /* Page number to clear */ + MemPage *pParent, /* Parent page. NULL for the root */ + int freePageFlag /* Deallocate page if true */ +){ + MemPage *pPage = 0; + int rc; + unsigned char *pCell; + int i; + + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pgno>sqlite3PagerPagecount(pBt->pPager) ){ + return SQLITE_CORRUPT_BKPT; + } + + rc = getAndInitPage(pBt, pgno, &pPage, pParent); + if( rc ) goto cleardatabasepage_out; + for(i=0; inCell; i++){ + pCell = findCell(pPage, i); + if( !pPage->leaf ){ + rc = clearDatabasePage(pBt, get4byte(pCell), pPage->pParent, 1); + if( rc ) goto cleardatabasepage_out; + } + rc = clearCell(pPage, pCell); + if( rc ) goto cleardatabasepage_out; + } + if( !pPage->leaf ){ + rc = clearDatabasePage(pBt, get4byte(&pPage->aData[8]), pPage->pParent, 1); + if( rc ) goto cleardatabasepage_out; + } + if( freePageFlag ){ + rc = freePage(pPage); + }else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){ + zeroPage(pPage, pPage->aData[0] | PTF_LEAF); + } + +cleardatabasepage_out: + releasePage(pPage); + return rc; +} + +/* +** Delete all information from a single table in the database. iTable is +** the page number of the root of the table. After this routine returns, +** the root page is empty, but still exists. +** +** This routine will fail with SQLITE_LOCKED if there are any open +** read cursors on the table. Open write cursors are moved to the +** root of the table. +*/ +int sqlite3BtreeClearTable(Btree *p, int iTable){ + int rc; + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( p->inTrans!=TRANS_WRITE ){ + rc = pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; + }else if( (rc = checkReadLocks(p, iTable, 0))!=SQLITE_OK ){ + /* nothing to do */ + }else if( SQLITE_OK!=(rc = saveAllCursors(pBt, iTable, 0)) ){ + /* nothing to do */ + }else{ + rc = clearDatabasePage(pBt, (Pgno)iTable, 0, 0); + } + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Erase all information in a table and add the root of the table to +** the freelist. Except, the root of the principle table (the one on +** page 1) is never added to the freelist. +** +** This routine will fail with SQLITE_LOCKED if there are any open +** cursors on the table. +** +** If AUTOVACUUM is enabled and the page at iTable is not the last +** root page in the database file, then the last root page +** in the database file is moved into the slot formerly occupied by +** iTable and that last slot formerly occupied by the last root page +** is added to the freelist instead of iTable. In this say, all +** root pages are kept at the beginning of the database file, which +** is necessary for AUTOVACUUM to work right. *piMoved is set to the +** page number that used to be the last root page in the file before +** the move. If no page gets moved, *piMoved is set to 0. +** The last root page is recorded in meta[3] and the value of +** meta[3] is updated by this procedure. +*/ +static int btreeDropTable(Btree *p, int iTable, int *piMoved){ + int rc; + MemPage *pPage = 0; + BtShared *pBt = p->pBt; + + assert( sqlite3BtreeHoldsMutex(p) ); + if( p->inTrans!=TRANS_WRITE ){ + return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; + } + + /* It is illegal to drop a table if any cursors are open on the + ** database. This is because in auto-vacuum mode the backend may + ** need to move another root-page to fill a gap left by the deleted + ** root page. If an open cursor was using this page a problem would + ** occur. + */ + if( pBt->pCursor ){ + return SQLITE_LOCKED; + } + + rc = sqlite3BtreeGetPage(pBt, (Pgno)iTable, &pPage, 0); + if( rc ) return rc; + rc = sqlite3BtreeClearTable(p, iTable); + if( rc ){ + releasePage(pPage); + return rc; + } + + *piMoved = 0; + + if( iTable>1 ){ +#ifdef SQLITE_OMIT_AUTOVACUUM + rc = freePage(pPage); + releasePage(pPage); +#else + if( pBt->autoVacuum ){ + Pgno maxRootPgno; + rc = sqlite3BtreeGetMeta(p, 4, &maxRootPgno); + if( rc!=SQLITE_OK ){ + releasePage(pPage); + return rc; + } + + if( iTable==maxRootPgno ){ + /* If the table being dropped is the table with the largest root-page + ** number in the database, put the root page on the free list. + */ + rc = freePage(pPage); + releasePage(pPage); + if( rc!=SQLITE_OK ){ + return rc; + } + }else{ + /* The table being dropped does not have the largest root-page + ** number in the database. So move the page that does into the + ** gap left by the deleted root-page. + */ + MemPage *pMove; + releasePage(pPage); + rc = sqlite3BtreeGetPage(pBt, maxRootPgno, &pMove, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable); + releasePage(pMove); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = sqlite3BtreeGetPage(pBt, maxRootPgno, &pMove, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = freePage(pMove); + releasePage(pMove); + if( rc!=SQLITE_OK ){ + return rc; + } + *piMoved = maxRootPgno; + } + + /* Set the new 'max-root-page' value in the database header. This + ** is the old value less one, less one more if that happens to + ** be a root-page number, less one again if that is the + ** PENDING_BYTE_PAGE. + */ + maxRootPgno--; + if( maxRootPgno==PENDING_BYTE_PAGE(pBt) ){ + maxRootPgno--; + } + if( maxRootPgno==PTRMAP_PAGENO(pBt, maxRootPgno) ){ + maxRootPgno--; + } + assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) ); + + rc = sqlite3BtreeUpdateMeta(p, 4, maxRootPgno); + }else{ + rc = freePage(pPage); + releasePage(pPage); + } +#endif + }else{ + /* If sqlite3BtreeDropTable was called on page 1. */ + zeroPage(pPage, PTF_INTKEY|PTF_LEAF ); + releasePage(pPage); + } + return rc; +} +int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeDropTable(p, iTable, piMoved); + sqlite3BtreeLeave(p); + return rc; +} + + +/* +** Read the meta-information out of a database file. Meta[0] +** is the number of free pages currently in the database. Meta[1] +** through meta[15] are available for use by higher layers. Meta[0] +** is read-only, the others are read/write. +** +** The schema layer numbers meta values differently. At the schema +** layer (and the SetCookie and ReadCookie opcodes) the number of +** free pages is not visible. So Cookie[0] is the same as Meta[1]. +*/ +int sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ + DbPage *pDbPage; + int rc; + unsigned char *pP1; + BtShared *pBt = p->pBt; + + sqlite3BtreeEnter(p); + + /* Reading a meta-data value requires a read-lock on page 1 (and hence + ** the sqlite_master table. We grab this lock regardless of whether or + ** not the SQLITE_ReadUncommitted flag is set (the table rooted at page + ** 1 is treated as a special case by queryTableLock() and lockTable()). + */ + rc = queryTableLock(p, 1, READ_LOCK); + if( rc!=SQLITE_OK ){ + sqlite3BtreeLeave(p); + return rc; + } + + assert( idx>=0 && idx<=15 ); + rc = sqlite3PagerGet(pBt->pPager, 1, &pDbPage); + if( rc ){ + sqlite3BtreeLeave(p); + return rc; + } + pP1 = (unsigned char *)sqlite3PagerGetData(pDbPage); + *pMeta = get4byte(&pP1[36 + idx*4]); + sqlite3PagerUnref(pDbPage); + + /* If autovacuumed is disabled in this build but we are trying to + ** access an autovacuumed database, then make the database readonly. + */ +#ifdef SQLITE_OMIT_AUTOVACUUM + if( idx==4 && *pMeta>0 ) pBt->readOnly = 1; +#endif + + /* Grab the read-lock on page 1. */ + rc = lockTable(p, 1, READ_LOCK); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Write meta-information back into the database. Meta[0] is +** read-only and may not be written. +*/ +int sqlite3BtreeUpdateMeta(Btree *p, int idx, u32 iMeta){ + BtShared *pBt = p->pBt; + unsigned char *pP1; + int rc; + assert( idx>=1 && idx<=15 ); + sqlite3BtreeEnter(p); + if( p->inTrans!=TRANS_WRITE ){ + rc = pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; + }else{ + assert( pBt->pPage1!=0 ); + pP1 = pBt->pPage1->aData; + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + if( rc==SQLITE_OK ){ + put4byte(&pP1[36 + idx*4], iMeta); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( idx==7 ){ + assert( pBt->autoVacuum || iMeta==0 ); + assert( iMeta==0 || iMeta==1 ); + pBt->incrVacuum = iMeta; + } +#endif + } + } + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Return the flag byte at the beginning of the page that the cursor +** is currently pointing to. +*/ +int sqlite3BtreeFlags(BtCursor *pCur){ + /* TODO: What about CURSOR_REQUIRESEEK state? Probably need to call + ** restoreOrClearCursorPosition() here. + */ + MemPage *pPage = pCur->pPage; + assert( cursorHoldsMutex(pCur) ); + assert( pPage->pBt==pCur->pBt ); + return pPage ? pPage->aData[pPage->hdrOffset] : 0; +} + + +/* +** Return the pager associated with a BTree. This routine is used for +** testing and debugging only. +*/ +Pager *sqlite3BtreePager(Btree *p){ + return p->pBt->pPager; +} + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* +** Append a message to the error message string. +*/ +static void checkAppendMsg( + IntegrityCk *pCheck, + char *zMsg1, + const char *zFormat, + ... +){ + va_list ap; + char *zMsg2; + if( !pCheck->mxErr ) return; + pCheck->mxErr--; + pCheck->nErr++; + va_start(ap, zFormat); + zMsg2 = sqlite3VMPrintf(0, zFormat, ap); + va_end(ap); + if( zMsg1==0 ) zMsg1 = ""; + if( pCheck->zErrMsg ){ + char *zOld = pCheck->zErrMsg; + pCheck->zErrMsg = 0; + sqlite3SetString(&pCheck->zErrMsg, zOld, "\n", zMsg1, zMsg2, (char*)0); + sqlite3_free(zOld); + }else{ + sqlite3SetString(&pCheck->zErrMsg, zMsg1, zMsg2, (char*)0); + } + sqlite3_free(zMsg2); +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* +** Add 1 to the reference count for page iPage. If this is the second +** reference to the page, add an error message to pCheck->zErrMsg. +** Return 1 if there are 2 ore more references to the page and 0 if +** if this is the first reference to the page. +** +** Also check that the page number is in bounds. +*/ +static int checkRef(IntegrityCk *pCheck, int iPage, char *zContext){ + if( iPage==0 ) return 1; + if( iPage>pCheck->nPage || iPage<0 ){ + checkAppendMsg(pCheck, zContext, "invalid page number %d", iPage); + return 1; + } + if( pCheck->anRef[iPage]==1 ){ + checkAppendMsg(pCheck, zContext, "2nd reference to page %d", iPage); + return 1; + } + return (pCheck->anRef[iPage]++)>1; +} + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** Check that the entry in the pointer-map for page iChild maps to +** page iParent, pointer type ptrType. If not, append an error message +** to pCheck. +*/ +static void checkPtrmap( + IntegrityCk *pCheck, /* Integrity check context */ + Pgno iChild, /* Child page number */ + u8 eType, /* Expected pointer map type */ + Pgno iParent, /* Expected pointer map parent page number */ + char *zContext /* Context description (used for error msg) */ +){ + int rc; + u8 ePtrmapType; + Pgno iPtrmapParent; + + rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); + if( rc!=SQLITE_OK ){ + checkAppendMsg(pCheck, zContext, "Failed to read ptrmap key=%d", iChild); + return; + } + + if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ + checkAppendMsg(pCheck, zContext, + "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", + iChild, eType, iParent, ePtrmapType, iPtrmapParent); + } +} +#endif + +/* +** Check the integrity of the freelist or of an overflow page list. +** Verify that the number of pages on the list is N. +*/ +static void checkList( + IntegrityCk *pCheck, /* Integrity checking context */ + int isFreeList, /* True for a freelist. False for overflow page list */ + int iPage, /* Page number for first page in the list */ + int N, /* Expected number of pages in the list */ + char *zContext /* Context for error messages */ +){ + int i; + int expected = N; + int iFirst = iPage; + while( N-- > 0 && pCheck->mxErr ){ + DbPage *pOvflPage; + unsigned char *pOvflData; + if( iPage<1 ){ + checkAppendMsg(pCheck, zContext, + "%d of %d pages missing from overflow list starting at %d", + N+1, expected, iFirst); + break; + } + if( checkRef(pCheck, iPage, zContext) ) break; + if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage) ){ + checkAppendMsg(pCheck, zContext, "failed to get page %d", iPage); + break; + } + pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage); + if( isFreeList ){ + int n = get4byte(&pOvflData[4]); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pCheck->pBt->autoVacuum ){ + checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0, zContext); + } +#endif + if( n>pCheck->pBt->usableSize/4-8 ){ + checkAppendMsg(pCheck, zContext, + "freelist leaf count too big on page %d", iPage); + N--; + }else{ + for(i=0; ipBt->autoVacuum ){ + checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0, zContext); + } +#endif + checkRef(pCheck, iFreePage, zContext); + } + N -= n; + } + } +#ifndef SQLITE_OMIT_AUTOVACUUM + else{ + /* If this database supports auto-vacuum and iPage is not the last + ** page in this overflow list, check that the pointer-map entry for + ** the following page matches iPage. + */ + if( pCheck->pBt->autoVacuum && N>0 ){ + i = get4byte(pOvflData); + checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage, zContext); + } + } +#endif + iPage = get4byte(pOvflData); + sqlite3PagerUnref(pOvflPage); + } +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* +** Do various sanity checks on a single page of a tree. Return +** the tree depth. Root pages return 0. Parents of root pages +** return 1, and so forth. +** +** These checks are done: +** +** 1. Make sure that cells and freeblocks do not overlap +** but combine to completely cover the page. +** NO 2. Make sure cell keys are in order. +** NO 3. Make sure no key is less than or equal to zLowerBound. +** NO 4. Make sure no key is greater than or equal to zUpperBound. +** 5. Check the integrity of overflow pages. +** 6. Recursively call checkTreePage on all children. +** 7. Verify that the depth of all children is the same. +** 8. Make sure this page is at least 33% full or else it is +** the root of the tree. +*/ +static int checkTreePage( + IntegrityCk *pCheck, /* Context for the sanity check */ + int iPage, /* Page number of the page to check */ + MemPage *pParent, /* Parent page */ + char *zParentContext /* Parent context */ +){ + MemPage *pPage; + int i, rc, depth, d2, pgno, cnt; + int hdr, cellStart; + int nCell; + u8 *data; + BtShared *pBt; + int usableSize; + char zContext[100]; + char *hit; + + sqlite3_snprintf(sizeof(zContext), zContext, "Page %d: ", iPage); + + /* Check that the page exists + */ + pBt = pCheck->pBt; + usableSize = pBt->usableSize; + if( iPage==0 ) return 0; + if( checkRef(pCheck, iPage, zParentContext) ) return 0; + if( (rc = sqlite3BtreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ + checkAppendMsg(pCheck, zContext, + "unable to get the page. error code=%d", rc); + return 0; + } + if( (rc = sqlite3BtreeInitPage(pPage, pParent))!=0 ){ + checkAppendMsg(pCheck, zContext, + "sqlite3BtreeInitPage() returns error code %d", rc); + releasePage(pPage); + return 0; + } + + /* Check out all the cells. + */ + depth = 0; + for(i=0; inCell && pCheck->mxErr; i++){ + u8 *pCell; + int sz; + CellInfo info; + + /* Check payload overflow pages + */ + sqlite3_snprintf(sizeof(zContext), zContext, + "On tree page %d cell %d: ", iPage, i); + pCell = findCell(pPage,i); + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + sz = info.nData; + if( !pPage->intKey ) sz += info.nKey; + assert( sz==info.nPayload ); + if( sz>info.nLocal ){ + int nPage = (sz - info.nLocal + usableSize - 5)/(usableSize - 4); + Pgno pgnoOvfl = get4byte(&pCell[info.iOverflow]); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage, zContext); + } +#endif + checkList(pCheck, 0, pgnoOvfl, nPage, zContext); + } + + /* Check sanity of left child page. + */ + if( !pPage->leaf ){ + pgno = get4byte(pCell); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, zContext); + } +#endif + d2 = checkTreePage(pCheck,pgno,pPage,zContext); + if( i>0 && d2!=depth ){ + checkAppendMsg(pCheck, zContext, "Child page depth differs"); + } + depth = d2; + } + } + if( !pPage->leaf ){ + pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); + sqlite3_snprintf(sizeof(zContext), zContext, + "On page %d at right child: ", iPage); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, 0); + } +#endif + checkTreePage(pCheck, pgno, pPage, zContext); + } + + /* Check for complete coverage of the page + */ + data = pPage->aData; + hdr = pPage->hdrOffset; + hit = sqlite3MallocZero( usableSize ); + if( hit ){ + memset(hit, 1, get2byte(&data[hdr+5])); + nCell = get2byte(&data[hdr+3]); + cellStart = hdr + 12 - 4*pPage->leaf; + for(i=0; i=usableSize || pc<0 ){ + checkAppendMsg(pCheck, 0, + "Corruption detected in cell %d on page %d",i,iPage,0); + }else{ + for(j=pc+size-1; j>=pc; j--) hit[j]++; + } + } + for(cnt=0, i=get2byte(&data[hdr+1]); i>0 && i=usableSize || i<0 ){ + checkAppendMsg(pCheck, 0, + "Corruption detected in cell %d on page %d",i,iPage,0); + }else{ + for(j=i+size-1; j>=i; j--) hit[j]++; + } + i = get2byte(&data[i]); + } + for(i=cnt=0; i1 ){ + checkAppendMsg(pCheck, 0, + "Multiple uses for byte %d of page %d", i, iPage); + break; + } + } + if( cnt!=data[hdr+7] ){ + checkAppendMsg(pCheck, 0, + "Fragmented space is %d byte reported as %d on page %d", + cnt, data[hdr+7], iPage); + } + } + sqlite3_free(hit); + + releasePage(pPage); + return depth+1; +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* +** This routine does a complete check of the given BTree file. aRoot[] is +** an array of pages numbers were each page number is the root page of +** a table. nRoot is the number of entries in aRoot. +** +** If everything checks out, this routine returns NULL. If something is +** amiss, an error message is written into memory obtained from malloc() +** and a pointer to that error message is returned. The calling function +** is responsible for freeing the error message when it is done. +*/ +char *sqlite3BtreeIntegrityCheck( + Btree *p, /* The btree to be checked */ + int *aRoot, /* An array of root pages numbers for individual trees */ + int nRoot, /* Number of entries in aRoot[] */ + int mxErr, /* Stop reporting errors after this many */ + int *pnErr /* Write number of errors seen to this variable */ +){ + int i; + int nRef; + IntegrityCk sCheck; + BtShared *pBt = p->pBt; + + sqlite3BtreeEnter(p); + nRef = sqlite3PagerRefcount(pBt->pPager); + if( lockBtreeWithRetry(p)!=SQLITE_OK ){ + sqlite3BtreeLeave(p); + return sqlite3StrDup("Unable to acquire a read lock on the database"); + } + sCheck.pBt = pBt; + sCheck.pPager = pBt->pPager; + sCheck.nPage = sqlite3PagerPagecount(sCheck.pPager); + sCheck.mxErr = mxErr; + sCheck.nErr = 0; + *pnErr = 0; +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->nTrunc!=0 ){ + sCheck.nPage = pBt->nTrunc; + } +#endif + if( sCheck.nPage==0 ){ + unlockBtreeIfUnused(pBt); + sqlite3BtreeLeave(p); + return 0; + } + sCheck.anRef = sqlite3_malloc( (sCheck.nPage+1)*sizeof(sCheck.anRef[0]) ); + if( !sCheck.anRef ){ + unlockBtreeIfUnused(pBt); + *pnErr = 1; + sqlite3BtreeLeave(p); + return sqlite3MPrintf(p->pSqlite, "Unable to malloc %d bytes", + (sCheck.nPage+1)*sizeof(sCheck.anRef[0])); + } + for(i=0; i<=sCheck.nPage; i++){ sCheck.anRef[i] = 0; } + i = PENDING_BYTE_PAGE(pBt); + if( i<=sCheck.nPage ){ + sCheck.anRef[i] = 1; + } + sCheck.zErrMsg = 0; + + /* Check the integrity of the freelist + */ + checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), + get4byte(&pBt->pPage1->aData[36]), "Main freelist: "); + + /* Check all the tables. + */ + for(i=0; iautoVacuum && aRoot[i]>1 ){ + checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0, 0); + } +#endif + checkTreePage(&sCheck, aRoot[i], 0, "List of tree roots: "); + } + + /* Make sure every page in the file is referenced + */ + for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ +#ifdef SQLITE_OMIT_AUTOVACUUM + if( sCheck.anRef[i]==0 ){ + checkAppendMsg(&sCheck, 0, "Page %d is never used", i); + } +#else + /* If the database supports auto-vacuum, make sure no tables contain + ** references to pointer-map pages. + */ + if( sCheck.anRef[i]==0 && + (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ + checkAppendMsg(&sCheck, 0, "Page %d is never used", i); + } + if( sCheck.anRef[i]!=0 && + (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ + checkAppendMsg(&sCheck, 0, "Pointer map page %d is referenced", i); + } +#endif + } + + /* Make sure this analysis did not leave any unref() pages + */ + unlockBtreeIfUnused(pBt); + if( nRef != sqlite3PagerRefcount(pBt->pPager) ){ + checkAppendMsg(&sCheck, 0, + "Outstanding page count goes from %d to %d during this analysis", + nRef, sqlite3PagerRefcount(pBt->pPager) + ); + } + + /* Clean up and report errors. + */ + sqlite3BtreeLeave(p); + sqlite3_free(sCheck.anRef); + *pnErr = sCheck.nErr; + return sCheck.zErrMsg; +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +/* +** Return the full pathname of the underlying database file. +** +** The pager filename is invariant as long as the pager is +** open so it is safe to access without the BtShared mutex. +*/ +const char *sqlite3BtreeGetFilename(Btree *p){ + assert( p->pBt->pPager!=0 ); + return sqlite3PagerFilename(p->pBt->pPager); +} + +/* +** Return the pathname of the directory that contains the database file. +** +** The pager directory name is invariant as long as the pager is +** open so it is safe to access without the BtShared mutex. +*/ +const char *sqlite3BtreeGetDirname(Btree *p){ + assert( p->pBt->pPager!=0 ); + return sqlite3PagerDirname(p->pBt->pPager); +} + +/* +** Return the pathname of the journal file for this database. The return +** value of this routine is the same regardless of whether the journal file +** has been created or not. +** +** The pager journal filename is invariant as long as the pager is +** open so it is safe to access without the BtShared mutex. +*/ +const char *sqlite3BtreeGetJournalname(Btree *p){ + assert( p->pBt->pPager!=0 ); + return sqlite3PagerJournalname(p->pBt->pPager); +} + +#ifndef SQLITE_OMIT_VACUUM +/* +** Copy the complete content of pBtFrom into pBtTo. A transaction +** must be active for both files. +** +** The size of file pBtFrom may be reduced by this operation. +** If anything goes wrong, the transaction on pBtFrom is rolled back. +*/ +static int btreeCopyFile(Btree *pTo, Btree *pFrom){ + int rc = SQLITE_OK; + Pgno i, nPage, nToPage, iSkip; + + BtShared *pBtTo = pTo->pBt; + BtShared *pBtFrom = pFrom->pBt; + + if( pTo->inTrans!=TRANS_WRITE || pFrom->inTrans!=TRANS_WRITE ){ + return SQLITE_ERROR; + } + if( pBtTo->pCursor ) return SQLITE_BUSY; + nToPage = sqlite3PagerPagecount(pBtTo->pPager); + nPage = sqlite3PagerPagecount(pBtFrom->pPager); + iSkip = PENDING_BYTE_PAGE(pBtTo); + for(i=1; rc==SQLITE_OK && i<=nPage; i++){ + DbPage *pDbPage; + if( i==iSkip ) continue; + rc = sqlite3PagerGet(pBtFrom->pPager, i, &pDbPage); + if( rc ) break; + rc = sqlite3PagerOverwrite(pBtTo->pPager, i, sqlite3PagerGetData(pDbPage)); + sqlite3PagerUnref(pDbPage); + } + + /* If the file is shrinking, journal the pages that are being truncated + ** so that they can be rolled back if the commit fails. + */ + for(i=nPage+1; rc==SQLITE_OK && i<=nToPage; i++){ + DbPage *pDbPage; + if( i==iSkip ) continue; + rc = sqlite3PagerGet(pBtTo->pPager, i, &pDbPage); + if( rc ) break; + rc = sqlite3PagerWrite(pDbPage); + sqlite3PagerDontWrite(pDbPage); + /* Yeah. It seems wierd to call DontWrite() right after Write(). But + ** that is because the names of those procedures do not exactly + ** represent what they do. Write() really means "put this page in the + ** rollback journal and mark it as dirty so that it will be written + ** to the database file later." DontWrite() undoes the second part of + ** that and prevents the page from being written to the database. The + ** page is still on the rollback journal, though. And that is the whole + ** point of this loop: to put pages on the rollback journal. */ + sqlite3PagerUnref(pDbPage); + } + if( !rc && nPagepPager, nPage); + } + + if( rc ){ + sqlite3BtreeRollback(pTo); + } + return rc; +} +int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ + int rc; + sqlite3BtreeEnter(pTo); + sqlite3BtreeEnter(pFrom); + rc = btreeCopyFile(pTo, pFrom); + sqlite3BtreeLeave(pFrom); + sqlite3BtreeLeave(pTo); + return rc; +} + +#endif /* SQLITE_OMIT_VACUUM */ + +/* +** Return non-zero if a transaction is active. +*/ +int sqlite3BtreeIsInTrans(Btree *p){ + assert( p==0 || sqlite3_mutex_held(p->pSqlite->mutex) ); + return (p && (p->inTrans==TRANS_WRITE)); +} + +/* +** Return non-zero if a statement transaction is active. +*/ +int sqlite3BtreeIsInStmt(Btree *p){ + assert( sqlite3BtreeHoldsMutex(p) ); + return (p->pBt && p->pBt->inStmt); +} + +/* +** Return non-zero if a read (or write) transaction is active. +*/ +int sqlite3BtreeIsInReadTrans(Btree *p){ + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + return (p && (p->inTrans!=TRANS_NONE)); +} + +/* +** This function returns a pointer to a blob of memory associated with +** a single shared-btree. The memory is used by client code for it's own +** purposes (for example, to store a high-level schema associated with +** the shared-btree). The btree layer manages reference counting issues. +** +** The first time this is called on a shared-btree, nBytes bytes of memory +** are allocated, zeroed, and returned to the caller. For each subsequent +** call the nBytes parameter is ignored and a pointer to the same blob +** of memory returned. +** +** Just before the shared-btree is closed, the function passed as the +** xFree argument when the memory allocation was made is invoked on the +** blob of allocated memory. This function should not call sqlite3_free() +** on the memory, the btree layer does that. +*/ +void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( !pBt->pSchema ){ + pBt->pSchema = sqlite3MallocZero(nBytes); + pBt->xFreeSchema = xFree; + } + sqlite3BtreeLeave(p); + return pBt->pSchema; +} + +/* +** Return true if another user of the same shared btree as the argument +** handle holds an exclusive lock on the sqlite_master table. +*/ +int sqlite3BtreeSchemaLocked(Btree *p){ + int rc; + assert( sqlite3_mutex_held(p->pSqlite->mutex) ); + sqlite3BtreeEnter(p); + rc = (queryTableLock(p, MASTER_ROOT, READ_LOCK)!=SQLITE_OK); + sqlite3BtreeLeave(p); + return rc; +} + + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Obtain a lock on the table whose root page is iTab. The +** lock is a write lock if isWritelock is true or a read lock +** if it is false. +*/ +int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){ + int rc = SQLITE_OK; + u8 lockType = (isWriteLock?WRITE_LOCK:READ_LOCK); + sqlite3BtreeEnter(p); + rc = queryTableLock(p, iTab, lockType); + if( rc==SQLITE_OK ){ + rc = lockTable(p, iTab, lockType); + } + sqlite3BtreeLeave(p); + return rc; +} +#endif + +#ifndef SQLITE_OMIT_INCRBLOB +/* +** Argument pCsr must be a cursor opened for writing on an +** INTKEY table currently pointing at a valid table entry. +** This function modifies the data stored as part of that entry. +** Only the data content may only be modified, it is not possible +** to change the length of the data stored. +*/ +int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){ + assert( cursorHoldsMutex(pCsr) ); + assert( sqlite3_mutex_held(pCsr->pBtree->pSqlite->mutex) ); + assert(pCsr->isIncrblobHandle); + if( pCsr->eState>=CURSOR_REQUIRESEEK ){ + if( pCsr->eState==CURSOR_FAULT ){ + return pCsr->skip; + }else{ + return SQLITE_ABORT; + } + } + + /* Check some preconditions: + ** (a) the cursor is open for writing, + ** (b) there is no read-lock on the table being modified and + ** (c) the cursor points at a valid row of an intKey table. + */ + if( !pCsr->wrFlag ){ + return SQLITE_READONLY; + } + assert( !pCsr->pBt->readOnly + && pCsr->pBt->inTransaction==TRANS_WRITE ); + if( checkReadLocks(pCsr->pBtree, pCsr->pgnoRoot, pCsr) ){ + return SQLITE_LOCKED; /* The table pCur points to has a read lock */ + } + if( pCsr->eState==CURSOR_INVALID || !pCsr->pPage->intKey ){ + return SQLITE_ERROR; + } + + return accessPayload(pCsr, offset, amt, (unsigned char *)z, 0, 1); +} + +/* +** Set a flag on this cursor to cache the locations of pages from the +** overflow list for the current row. This is used by cursors opened +** for incremental blob IO only. +** +** This function sets a flag only. The actual page location cache +** (stored in BtCursor.aOverflow[]) is allocated and used by function +** accessPayload() (the worker function for sqlite3BtreeData() and +** sqlite3BtreePutData()). +*/ +void sqlite3BtreeCacheOverflow(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->pSqlite->mutex) ); + assert(!pCur->isIncrblobHandle); + assert(!pCur->aOverflow); + pCur->isIncrblobHandle = 1; +} +#endif diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/btree.h b/libraries/sqlite/unix/sqlite-3.5.1/src/btree.h new file mode 100644 index 0000000000..f7bc8e12d9 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/btree.h @@ -0,0 +1,204 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the sqlite B-Tree file +** subsystem. See comments in the source code for a detailed description +** of what each interface routine does. +** +** @(#) $Id: btree.h,v 1.93 2007/09/03 15:19:35 drh Exp $ +*/ +#ifndef _BTREE_H_ +#define _BTREE_H_ + +/* TODO: This definition is just included so other modules compile. It +** needs to be revisited. +*/ +#define SQLITE_N_BTREE_META 10 + +/* +** If defined as non-zero, auto-vacuum is enabled by default. Otherwise +** it must be turned on for each database using "PRAGMA auto_vacuum = 1". +*/ +#ifndef SQLITE_DEFAULT_AUTOVACUUM + #define SQLITE_DEFAULT_AUTOVACUUM 0 +#endif + +#define BTREE_AUTOVACUUM_NONE 0 /* Do not do auto-vacuum */ +#define BTREE_AUTOVACUUM_FULL 1 /* Do full auto-vacuum */ +#define BTREE_AUTOVACUUM_INCR 2 /* Incremental vacuum */ + +/* +** Forward declarations of structure +*/ +typedef struct Btree Btree; +typedef struct BtCursor BtCursor; +typedef struct BtShared BtShared; +typedef struct BtreeMutexArray BtreeMutexArray; + +/* +** This structure records all of the Btrees that need to hold +** a mutex before we enter sqlite3VdbeExec(). The Btrees are +** are placed in aBtree[] in order of aBtree[]->pBt. That way, +** we can always lock and unlock them all quickly. +*/ +struct BtreeMutexArray { + int nMutex; + Btree *aBtree[SQLITE_MAX_ATTACHED+1]; +}; + + +int sqlite3BtreeOpen( + const char *zFilename, /* Name of database file to open */ + sqlite3 *db, /* Associated database connection */ + Btree **, /* Return open Btree* here */ + int flags, /* Flags */ + int vfsFlags /* Flags passed through to VFS open */ +); + +/* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the +** following values. +** +** NOTE: These values must match the corresponding PAGER_ values in +** pager.h. +*/ +#define BTREE_OMIT_JOURNAL 1 /* Do not use journal. No argument */ +#define BTREE_NO_READLOCK 2 /* Omit readlocks on readonly files */ +#define BTREE_MEMORY 4 /* In-memory DB. No argument */ +#define BTREE_READONLY 8 /* Open the database in read-only mode */ +#define BTREE_READWRITE 16 /* Open for both reading and writing */ +#define BTREE_CREATE 32 /* Create the database if it does not exist */ + +/* Additional values for the 4th argument of sqlite3BtreeOpen that +** are not associated with PAGER_ values. +*/ +#define BTREE_PRIVATE 64 /* Never share with other connections */ + +int sqlite3BtreeClose(Btree*); +int sqlite3BtreeSetBusyHandler(Btree*,BusyHandler*); +int sqlite3BtreeSetCacheSize(Btree*,int); +int sqlite3BtreeSetSafetyLevel(Btree*,int,int); +int sqlite3BtreeSyncDisabled(Btree*); +int sqlite3BtreeSetPageSize(Btree*,int,int); +int sqlite3BtreeGetPageSize(Btree*); +int sqlite3BtreeMaxPageCount(Btree*,int); +int sqlite3BtreeGetReserve(Btree*); +int sqlite3BtreeSetAutoVacuum(Btree *, int); +int sqlite3BtreeGetAutoVacuum(Btree *); +int sqlite3BtreeBeginTrans(Btree*,int); +int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster); +int sqlite3BtreeCommitPhaseTwo(Btree*); +int sqlite3BtreeCommit(Btree*); +int sqlite3BtreeRollback(Btree*); +int sqlite3BtreeBeginStmt(Btree*); +int sqlite3BtreeCommitStmt(Btree*); +int sqlite3BtreeRollbackStmt(Btree*); +int sqlite3BtreeCreateTable(Btree*, int*, int flags); +int sqlite3BtreeIsInTrans(Btree*); +int sqlite3BtreeIsInStmt(Btree*); +int sqlite3BtreeIsInReadTrans(Btree*); +void *sqlite3BtreeSchema(Btree *, int, void(*)(void *)); +int sqlite3BtreeSchemaLocked(Btree *); +int sqlite3BtreeLockTable(Btree *, int, u8); + +const char *sqlite3BtreeGetFilename(Btree *); +const char *sqlite3BtreeGetDirname(Btree *); +const char *sqlite3BtreeGetJournalname(Btree *); +int sqlite3BtreeCopyFile(Btree *, Btree *); + +int sqlite3BtreeIncrVacuum(Btree *); + +/* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR +** of the following flags: +*/ +#define BTREE_INTKEY 1 /* Table has only 64-bit signed integer keys */ +#define BTREE_ZERODATA 2 /* Table has keys only - no data */ +#define BTREE_LEAFDATA 4 /* Data stored in leaves only. Implies INTKEY */ + +int sqlite3BtreeDropTable(Btree*, int, int*); +int sqlite3BtreeClearTable(Btree*, int); +int sqlite3BtreeGetMeta(Btree*, int idx, u32 *pValue); +int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value); +void sqlite3BtreeTripAllCursors(Btree*, int); + +int sqlite3BtreeCursor( + Btree*, /* BTree containing table to open */ + int iTable, /* Index of root page */ + int wrFlag, /* 1 for writing. 0 for read-only */ + int(*)(void*,int,const void*,int,const void*), /* Key comparison function */ + void*, /* First argument to compare function */ + BtCursor **ppCursor /* Returned cursor */ +); + +int sqlite3BtreeCloseCursor(BtCursor*); +int sqlite3BtreeMoveto(BtCursor*,const void *pKey,i64 nKey,int bias,int *pRes); +int sqlite3BtreeDelete(BtCursor*); +int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey, + const void *pData, int nData, + int nZero, int bias); +int sqlite3BtreeFirst(BtCursor*, int *pRes); +int sqlite3BtreeLast(BtCursor*, int *pRes); +int sqlite3BtreeNext(BtCursor*, int *pRes); +int sqlite3BtreeEof(BtCursor*); +int sqlite3BtreeFlags(BtCursor*); +int sqlite3BtreePrevious(BtCursor*, int *pRes); +int sqlite3BtreeKeySize(BtCursor*, i64 *pSize); +int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*); +sqlite3 *sqlite3BtreeCursorDb(const BtCursor*); +const void *sqlite3BtreeKeyFetch(BtCursor*, int *pAmt); +const void *sqlite3BtreeDataFetch(BtCursor*, int *pAmt); +int sqlite3BtreeDataSize(BtCursor*, u32 *pSize); +int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*); + +char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); +struct Pager *sqlite3BtreePager(Btree*); + +int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); +void sqlite3BtreeCacheOverflow(BtCursor *); + +#ifdef SQLITE_TEST +int sqlite3BtreeCursorInfo(BtCursor*, int*, int); +void sqlite3BtreeCursorList(Btree*); +int sqlite3BtreePageDump(Btree*, int, int recursive); +#endif + +/* +** If we are not using shared cache, then there is no need to +** use mutexes to access the BtShared structures. So make the +** Enter and Leave procedures no-ops. +*/ +#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE + void sqlite3BtreeEnter(Btree*); + void sqlite3BtreeLeave(Btree*); + int sqlite3BtreeHoldsMutex(Btree*); + void sqlite3BtreeEnterCursor(BtCursor*); + void sqlite3BtreeLeaveCursor(BtCursor*); + void sqlite3BtreeEnterAll(sqlite3*); + void sqlite3BtreeLeaveAll(sqlite3*); + int sqlite3BtreeHoldsAllMutexes(sqlite3*); + void sqlite3BtreeMutexArrayEnter(BtreeMutexArray*); + void sqlite3BtreeMutexArrayLeave(BtreeMutexArray*); + void sqlite3BtreeMutexArrayInsert(BtreeMutexArray*, Btree*); +#else +# define sqlite3BtreeEnter(X) +# define sqlite3BtreeLeave(X) +# define sqlite3BtreeHoldsMutex(X) 1 +# define sqlite3BtreeEnterCursor(X) +# define sqlite3BtreeLeaveCursor(X) +# define sqlite3BtreeEnterAll(X) +# define sqlite3BtreeLeaveAll(X) +# define sqlite3BtreeHoldsAllMutexes(X) 1 +# define sqlite3BtreeMutexArrayEnter(X) +# define sqlite3BtreeMutexArrayLeave(X) +# define sqlite3BtreeMutexArrayInsert(X,Y) +#endif + + +#endif /* _BTREE_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/btreeInt.h b/libraries/sqlite/unix/sqlite-3.5.1/src/btreeInt.h new file mode 100644 index 0000000000..09f1474239 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/btreeInt.h @@ -0,0 +1,648 @@ +/* +** 2004 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** $Id: btreeInt.h,v 1.13 2007/08/30 01:19:59 drh Exp $ +** +** This file implements a external (disk-based) database using BTrees. +** For a detailed discussion of BTrees, refer to +** +** Donald E. Knuth, THE ART OF COMPUTER PROGRAMMING, Volume 3: +** "Sorting And Searching", pages 473-480. Addison-Wesley +** Publishing Company, Reading, Massachusetts. +** +** The basic idea is that each page of the file contains N database +** entries and N+1 pointers to subpages. +** +** ---------------------------------------------------------------- +** | Ptr(0) | Key(0) | Ptr(1) | Key(1) | ... | Key(N-1) | Ptr(N) | +** ---------------------------------------------------------------- +** +** All of the keys on the page that Ptr(0) points to have values less +** than Key(0). All of the keys on page Ptr(1) and its subpages have +** values greater than Key(0) and less than Key(1). All of the keys +** on Ptr(N) and its subpages have values greater than Key(N-1). And +** so forth. +** +** Finding a particular key requires reading O(log(M)) pages from the +** disk where M is the number of entries in the tree. +** +** In this implementation, a single file can hold one or more separate +** BTrees. Each BTree is identified by the index of its root page. The +** key and data for any entry are combined to form the "payload". A +** fixed amount of payload can be carried directly on the database +** page. If the payload is larger than the preset amount then surplus +** bytes are stored on overflow pages. The payload for an entry +** and the preceding pointer are combined to form a "Cell". Each +** page has a small header which contains the Ptr(N) pointer and other +** information such as the size of key and data. +** +** FORMAT DETAILS +** +** The file is divided into pages. The first page is called page 1, +** the second is page 2, and so forth. A page number of zero indicates +** "no such page". The page size can be anything between 512 and 65536. +** Each page can be either a btree page, a freelist page or an overflow +** page. +** +** The first page is always a btree page. The first 100 bytes of the first +** page contain a special header (the "file header") that describes the file. +** The format of the file header is as follows: +** +** OFFSET SIZE DESCRIPTION +** 0 16 Header string: "SQLite format 3\000" +** 16 2 Page size in bytes. +** 18 1 File format write version +** 19 1 File format read version +** 20 1 Bytes of unused space at the end of each page +** 21 1 Max embedded payload fraction +** 22 1 Min embedded payload fraction +** 23 1 Min leaf payload fraction +** 24 4 File change counter +** 28 4 Reserved for future use +** 32 4 First freelist page +** 36 4 Number of freelist pages in the file +** 40 60 15 4-byte meta values passed to higher layers +** +** All of the integer values are big-endian (most significant byte first). +** +** The file change counter is incremented when the database is changed +** This counter allows other processes to know when the file has changed +** and thus when they need to flush their cache. +** +** The max embedded payload fraction is the amount of the total usable +** space in a page that can be consumed by a single cell for standard +** B-tree (non-LEAFDATA) tables. A value of 255 means 100%. The default +** is to limit the maximum cell size so that at least 4 cells will fit +** on one page. Thus the default max embedded payload fraction is 64. +** +** If the payload for a cell is larger than the max payload, then extra +** payload is spilled to overflow pages. Once an overflow page is allocated, +** as many bytes as possible are moved into the overflow pages without letting +** the cell size drop below the min embedded payload fraction. +** +** The min leaf payload fraction is like the min embedded payload fraction +** except that it applies to leaf nodes in a LEAFDATA tree. The maximum +** payload fraction for a LEAFDATA tree is always 100% (or 255) and it +** not specified in the header. +** +** Each btree pages is divided into three sections: The header, the +** cell pointer array, and the cell content area. Page 1 also has a 100-byte +** file header that occurs before the page header. +** +** |----------------| +** | file header | 100 bytes. Page 1 only. +** |----------------| +** | page header | 8 bytes for leaves. 12 bytes for interior nodes +** |----------------| +** | cell pointer | | 2 bytes per cell. Sorted order. +** | array | | Grows downward +** | | v +** |----------------| +** | unallocated | +** | space | +** |----------------| ^ Grows upwards +** | cell content | | Arbitrary order interspersed with freeblocks. +** | area | | and free space fragments. +** |----------------| +** +** The page headers looks like this: +** +** OFFSET SIZE DESCRIPTION +** 0 1 Flags. 1: intkey, 2: zerodata, 4: leafdata, 8: leaf +** 1 2 byte offset to the first freeblock +** 3 2 number of cells on this page +** 5 2 first byte of the cell content area +** 7 1 number of fragmented free bytes +** 8 4 Right child (the Ptr(N) value). Omitted on leaves. +** +** The flags define the format of this btree page. The leaf flag means that +** this page has no children. The zerodata flag means that this page carries +** only keys and no data. The intkey flag means that the key is a integer +** which is stored in the key size entry of the cell header rather than in +** the payload area. +** +** The cell pointer array begins on the first byte after the page header. +** The cell pointer array contains zero or more 2-byte numbers which are +** offsets from the beginning of the page to the cell content in the cell +** content area. The cell pointers occur in sorted order. The system strives +** to keep free space after the last cell pointer so that new cells can +** be easily added without having to defragment the page. +** +** Cell content is stored at the very end of the page and grows toward the +** beginning of the page. +** +** Unused space within the cell content area is collected into a linked list of +** freeblocks. Each freeblock is at least 4 bytes in size. The byte offset +** to the first freeblock is given in the header. Freeblocks occur in +** increasing order. Because a freeblock must be at least 4 bytes in size, +** any group of 3 or fewer unused bytes in the cell content area cannot +** exist on the freeblock chain. A group of 3 or fewer free bytes is called +** a fragment. The total number of bytes in all fragments is recorded. +** in the page header at offset 7. +** +** SIZE DESCRIPTION +** 2 Byte offset of the next freeblock +** 2 Bytes in this freeblock +** +** Cells are of variable length. Cells are stored in the cell content area at +** the end of the page. Pointers to the cells are in the cell pointer array +** that immediately follows the page header. Cells is not necessarily +** contiguous or in order, but cell pointers are contiguous and in order. +** +** Cell content makes use of variable length integers. A variable +** length integer is 1 to 9 bytes where the lower 7 bits of each +** byte are used. The integer consists of all bytes that have bit 8 set and +** the first byte with bit 8 clear. The most significant byte of the integer +** appears first. A variable-length integer may not be more than 9 bytes long. +** As a special case, all 8 bytes of the 9th byte are used as data. This +** allows a 64-bit integer to be encoded in 9 bytes. +** +** 0x00 becomes 0x00000000 +** 0x7f becomes 0x0000007f +** 0x81 0x00 becomes 0x00000080 +** 0x82 0x00 becomes 0x00000100 +** 0x80 0x7f becomes 0x0000007f +** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678 +** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081 +** +** Variable length integers are used for rowids and to hold the number of +** bytes of key and data in a btree cell. +** +** The content of a cell looks like this: +** +** SIZE DESCRIPTION +** 4 Page number of the left child. Omitted if leaf flag is set. +** var Number of bytes of data. Omitted if the zerodata flag is set. +** var Number of bytes of key. Or the key itself if intkey flag is set. +** * Payload +** 4 First page of the overflow chain. Omitted if no overflow +** +** Overflow pages form a linked list. Each page except the last is completely +** filled with data (pagesize - 4 bytes). The last page can have as little +** as 1 byte of data. +** +** SIZE DESCRIPTION +** 4 Page number of next overflow page +** * Data +** +** Freelist pages come in two subtypes: trunk pages and leaf pages. The +** file header points to the first in a linked list of trunk page. Each trunk +** page points to multiple leaf pages. The content of a leaf page is +** unspecified. A trunk page looks like this: +** +** SIZE DESCRIPTION +** 4 Page number of next trunk page +** 4 Number of leaf pointers on this page +** * zero or more pages numbers of leaves +*/ +#include "sqliteInt.h" +#include "pager.h" +#include "btree.h" +#include "os.h" +#include + +/* Round up a number to the next larger multiple of 8. This is used +** to force 8-byte alignment on 64-bit architectures. +*/ +#define ROUND8(x) ((x+7)&~7) + + +/* The following value is the maximum cell size assuming a maximum page +** size give above. +*/ +#define MX_CELL_SIZE(pBt) (pBt->pageSize-8) + +/* The maximum number of cells on a single page of the database. This +** assumes a minimum cell size of 3 bytes. Such small cells will be +** exceedingly rare, but they are possible. +*/ +#define MX_CELL(pBt) ((pBt->pageSize-8)/3) + +/* Forward declarations */ +typedef struct MemPage MemPage; +typedef struct BtLock BtLock; + +/* +** This is a magic string that appears at the beginning of every +** SQLite database in order to identify the file as a real database. +** +** You can change this value at compile-time by specifying a +** -DSQLITE_FILE_HEADER="..." on the compiler command-line. The +** header must be exactly 16 bytes including the zero-terminator so +** the string itself should be 15 characters long. If you change +** the header, then your custom library will not be able to read +** databases generated by the standard tools and the standard tools +** will not be able to read databases created by your custom library. +*/ +#ifndef SQLITE_FILE_HEADER /* 123456789 123456 */ +# define SQLITE_FILE_HEADER "SQLite format 3" +#endif + +/* +** Page type flags. An ORed combination of these flags appear as the +** first byte of on-disk image of every BTree page. +*/ +#define PTF_INTKEY 0x01 +#define PTF_ZERODATA 0x02 +#define PTF_LEAFDATA 0x04 +#define PTF_LEAF 0x08 + +/* +** As each page of the file is loaded into memory, an instance of the following +** structure is appended and initialized to zero. This structure stores +** information about the page that is decoded from the raw file page. +** +** The pParent field points back to the parent page. This allows us to +** walk up the BTree from any leaf to the root. Care must be taken to +** unref() the parent page pointer when this page is no longer referenced. +** The pageDestructor() routine handles that chore. +** +** Access to all fields of this structure is controlled by the mutex +** stored in MemPage.pBt->mutex. +*/ +struct MemPage { + u8 isInit; /* True if previously initialized. MUST BE FIRST! */ + u8 idxShift; /* True if Cell indices have changed */ + u8 nOverflow; /* Number of overflow cell bodies in aCell[] */ + u8 intKey; /* True if intkey flag is set */ + u8 leaf; /* True if leaf flag is set */ + u8 zeroData; /* True if table stores keys only */ + u8 leafData; /* True if tables stores data on leaves only */ + u8 hasData; /* True if this page stores data */ + u8 hdrOffset; /* 100 for page 1. 0 otherwise */ + u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */ + u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ + u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ + u16 cellOffset; /* Index in aData of first cell pointer */ + u16 idxParent; /* Index in parent of this node */ + u16 nFree; /* Number of free bytes on the page */ + u16 nCell; /* Number of cells on this page, local and ovfl */ + struct _OvflCell { /* Cells that will not fit on aData[] */ + u8 *pCell; /* Pointers to the body of the overflow cell */ + u16 idx; /* Insert this cell before idx-th non-overflow cell */ + } aOvfl[5]; + BtShared *pBt; /* Pointer to BtShared that this page is part of */ + u8 *aData; /* Pointer to disk image of the page data */ + DbPage *pDbPage; /* Pager page handle */ + Pgno pgno; /* Page number for this page */ + MemPage *pParent; /* The parent of this page. NULL for root */ +}; + +/* +** The in-memory image of a disk page has the auxiliary information appended +** to the end. EXTRA_SIZE is the number of bytes of space needed to hold +** that extra information. +*/ +#define EXTRA_SIZE sizeof(MemPage) + +/* A Btree handle +** +** A database connection contains a pointer to an instance of +** this object for every database file that it has open. This structure +** is opaque to the database connection. The database connection cannot +** see the internals of this structure and only deals with pointers to +** this structure. +** +** For some database files, the same underlying database cache might be +** shared between multiple connections. In that case, each contection +** has it own pointer to this object. But each instance of this object +** points to the same BtShared object. The database cache and the +** schema associated with the database file are all contained within +** the BtShared object. +** +** All fields in this structure are accessed under sqlite3.mutex. +** The pBt pointer itself may not be changed while there exists cursors +** in the referenced BtShared that point back to this Btree since those +** cursors have to do go through this Btree to find their BtShared and +** they often do so without holding sqlite3.mutex. +*/ +struct Btree { + sqlite3 *pSqlite; /* The database connection holding this btree */ + BtShared *pBt; /* Sharable content of this btree */ + u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */ + u8 sharable; /* True if we can share pBt with other pSqlite */ + u8 locked; /* True if pSqlite currently has pBt locked */ + int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */ + Btree *pNext; /* List of other sharable Btrees from the same pSqlite */ + Btree *pPrev; /* Back pointer of the same list */ +}; + +/* +** Btree.inTrans may take one of the following values. +** +** If the shared-data extension is enabled, there may be multiple users +** of the Btree structure. At most one of these may open a write transaction, +** but any number may have active read transactions. +*/ +#define TRANS_NONE 0 +#define TRANS_READ 1 +#define TRANS_WRITE 2 + +/* +** An instance of this object represents a single database file. +** +** A single database file can be in use as the same time by two +** or more database connections. When two or more connections are +** sharing the same database file, each connection has it own +** private Btree object for the file and each of those Btrees points +** to this one BtShared object. BtShared.nRef is the number of +** connections currently sharing this database file. +** +** Fields in this structure are accessed under the BtShared.mutex +** mutex, except for nRef and pNext which are accessed under the +** global SQLITE_MUTEX_STATIC_MASTER mutex. The pPager field +** may not be modified once it is initially set as long as nRef>0. +** The pSchema field may be set once under BtShared.mutex and +** thereafter is unchanged as long as nRef>0. +*/ +struct BtShared { + Pager *pPager; /* The page cache */ + BtCursor *pCursor; /* A list of all open cursors */ + MemPage *pPage1; /* First page of the database */ + u8 inStmt; /* True if we are in a statement subtransaction */ + u8 readOnly; /* True if the underlying file is readonly */ + u8 maxEmbedFrac; /* Maximum payload as % of total page size */ + u8 minEmbedFrac; /* Minimum payload as % of total page size */ + u8 minLeafFrac; /* Minimum leaf payload as % of total page size */ + u8 pageSizeFixed; /* True if the page size can no longer be changed */ +#ifndef SQLITE_OMIT_AUTOVACUUM + u8 autoVacuum; /* True if auto-vacuum is enabled */ + u8 incrVacuum; /* True if incr-vacuum is enabled */ + Pgno nTrunc; /* Non-zero if the db will be truncated (incr vacuum) */ +#endif + u16 pageSize; /* Total number of bytes on a page */ + u16 usableSize; /* Number of usable bytes on each page */ + int maxLocal; /* Maximum local payload in non-LEAFDATA tables */ + int minLocal; /* Minimum local payload in non-LEAFDATA tables */ + int maxLeaf; /* Maximum local payload in a LEAFDATA table */ + int minLeaf; /* Minimum local payload in a LEAFDATA table */ + BusyHandler *pBusyHandler; /* Callback for when there is lock contention */ + u8 inTransaction; /* Transaction state */ + int nTransaction; /* Number of open transactions (read + write) */ + void *pSchema; /* Pointer to space allocated by sqlite3BtreeSchema() */ + void (*xFreeSchema)(void*); /* Destructor for BtShared.pSchema */ + sqlite3_mutex *mutex; /* Non-recursive mutex required to access this struct */ +#ifndef SQLITE_OMIT_SHARED_CACHE + int nRef; /* Number of references to this structure */ + BtShared *pNext; /* Next on a list of sharable BtShared structs */ + BtLock *pLock; /* List of locks held on this shared-btree struct */ +#endif +}; + +/* +** An instance of the following structure is used to hold information +** about a cell. The parseCellPtr() function fills in this structure +** based on information extract from the raw disk page. +*/ +typedef struct CellInfo CellInfo; +struct CellInfo { + u8 *pCell; /* Pointer to the start of cell content */ + i64 nKey; /* The key for INTKEY tables, or number of bytes in key */ + u32 nData; /* Number of bytes of data */ + u32 nPayload; /* Total amount of payload */ + u16 nHeader; /* Size of the cell content header in bytes */ + u16 nLocal; /* Amount of payload held locally */ + u16 iOverflow; /* Offset to overflow page number. Zero if no overflow */ + u16 nSize; /* Size of the cell content on the main b-tree page */ +}; + +/* +** A cursor is a pointer to a particular entry within a particular +** b-tree within a database file. +** +** The entry is identified by its MemPage and the index in +** MemPage.aCell[] of the entry. +** +** When a single database file can shared by two more database connections, +** but cursors cannot be shared. Each cursor is associated with a +** particular database connection identified BtCursor.pBtree.pSqlite. +** +** Fields in this structure are accessed under the BtShared.mutex +** found at self->pBt->mutex. +*/ +struct BtCursor { + Btree *pBtree; /* The Btree to which this cursor belongs */ + BtShared *pBt; /* The BtShared this cursor points to */ + BtCursor *pNext, *pPrev; /* Forms a linked list of all cursors */ + int (*xCompare)(void*,int,const void*,int,const void*); /* Key comp func */ + void *pArg; /* First arg to xCompare() */ + Pgno pgnoRoot; /* The root page of this tree */ + MemPage *pPage; /* Page that contains the entry */ + int idx; /* Index of the entry in pPage->aCell[] */ + CellInfo info; /* A parse of the cell we are pointing at */ + u8 wrFlag; /* True if writable */ + u8 eState; /* One of the CURSOR_XXX constants (see below) */ + void *pKey; /* Saved key that was cursor's last known position */ + i64 nKey; /* Size of pKey, or last integer key */ + int skip; /* (skip<0) -> Prev() is a no-op. (skip>0) -> Next() is */ +#ifndef SQLITE_OMIT_INCRBLOB + u8 isIncrblobHandle; /* True if this cursor is an incr. io handle */ + Pgno *aOverflow; /* Cache of overflow page locations */ +#endif +}; + +/* +** Potential values for BtCursor.eState. +** +** CURSOR_VALID: +** Cursor points to a valid entry. getPayload() etc. may be called. +** +** CURSOR_INVALID: +** Cursor does not point to a valid entry. This can happen (for example) +** because the table is empty or because BtreeCursorFirst() has not been +** called. +** +** CURSOR_REQUIRESEEK: +** The table that this cursor was opened on still exists, but has been +** modified since the cursor was last used. The cursor position is saved +** in variables BtCursor.pKey and BtCursor.nKey. When a cursor is in +** this state, restoreOrClearCursorPosition() can be called to attempt to +** seek the cursor to the saved position. +** +** CURSOR_FAULT: +** A unrecoverable error (an I/O error or a malloc failure) has occurred +** on a different connection that shares the BtShared cache with this +** cursor. The error has left the cache in an inconsistent state. +** Do nothing else with this cursor. Any attempt to use the cursor +** should return the error code stored in BtCursor.skip +*/ +#define CURSOR_INVALID 0 +#define CURSOR_VALID 1 +#define CURSOR_REQUIRESEEK 2 +#define CURSOR_FAULT 3 + +/* +** The TRACE macro will print high-level status information about the +** btree operation when the global variable sqlite3_btree_trace is +** enabled. +*/ +#if SQLITE_TEST +# define TRACE(X) if( sqlite3_btree_trace ){ printf X; fflush(stdout); } +#else +# define TRACE(X) +#endif + +/* +** Routines to read and write variable-length integers. These used to +** be defined locally, but now we use the varint routines in the util.c +** file. +*/ +#define getVarint sqlite3GetVarint +#define getVarint32(A,B) ((*B=*(A))<=0x7f?1:sqlite3GetVarint32(A,B)) +#define putVarint sqlite3PutVarint + +/* The database page the PENDING_BYTE occupies. This page is never used. +** TODO: This macro is very similary to PAGER_MJ_PGNO() in pager.c. They +** should possibly be consolidated (presumably in pager.h). +** +** If disk I/O is omitted (meaning that the database is stored purely +** in memory) then there is no pending byte. +*/ +#ifdef SQLITE_OMIT_DISKIO +# define PENDING_BYTE_PAGE(pBt) 0x7fffffff +#else +# define PENDING_BYTE_PAGE(pBt) ((PENDING_BYTE/(pBt)->pageSize)+1) +#endif + +/* +** A linked list of the following structures is stored at BtShared.pLock. +** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor +** is opened on the table with root page BtShared.iTable. Locks are removed +** from this list when a transaction is committed or rolled back, or when +** a btree handle is closed. +*/ +struct BtLock { + Btree *pBtree; /* Btree handle holding this lock */ + Pgno iTable; /* Root page of table */ + u8 eLock; /* READ_LOCK or WRITE_LOCK */ + BtLock *pNext; /* Next in BtShared.pLock list */ +}; + +/* Candidate values for BtLock.eLock */ +#define READ_LOCK 1 +#define WRITE_LOCK 2 + +/* +** These macros define the location of the pointer-map entry for a +** database page. The first argument to each is the number of usable +** bytes on each page of the database (often 1024). The second is the +** page number to look up in the pointer map. +** +** PTRMAP_PAGENO returns the database page number of the pointer-map +** page that stores the required pointer. PTRMAP_PTROFFSET returns +** the offset of the requested map entry. +** +** If the pgno argument passed to PTRMAP_PAGENO is a pointer-map page, +** then pgno is returned. So (pgno==PTRMAP_PAGENO(pgsz, pgno)) can be +** used to test if pgno is a pointer-map page. PTRMAP_ISPAGE implements +** this test. +*/ +#define PTRMAP_PAGENO(pBt, pgno) ptrmapPageno(pBt, pgno) +#define PTRMAP_PTROFFSET(pBt, pgno) (5*(pgno-ptrmapPageno(pBt, pgno)-1)) +#define PTRMAP_ISPAGE(pBt, pgno) (PTRMAP_PAGENO((pBt),(pgno))==(pgno)) + +/* +** The pointer map is a lookup table that identifies the parent page for +** each child page in the database file. The parent page is the page that +** contains a pointer to the child. Every page in the database contains +** 0 or 1 parent pages. (In this context 'database page' refers +** to any page that is not part of the pointer map itself.) Each pointer map +** entry consists of a single byte 'type' and a 4 byte parent page number. +** The PTRMAP_XXX identifiers below are the valid types. +** +** The purpose of the pointer map is to facility moving pages from one +** position in the file to another as part of autovacuum. When a page +** is moved, the pointer in its parent must be updated to point to the +** new location. The pointer map is used to locate the parent page quickly. +** +** PTRMAP_ROOTPAGE: The database page is a root-page. The page-number is not +** used in this case. +** +** PTRMAP_FREEPAGE: The database page is an unused (free) page. The page-number +** is not used in this case. +** +** PTRMAP_OVERFLOW1: The database page is the first page in a list of +** overflow pages. The page number identifies the page that +** contains the cell with a pointer to this overflow page. +** +** PTRMAP_OVERFLOW2: The database page is the second or later page in a list of +** overflow pages. The page-number identifies the previous +** page in the overflow page list. +** +** PTRMAP_BTREE: The database page is a non-root btree page. The page number +** identifies the parent page in the btree. +*/ +#define PTRMAP_ROOTPAGE 1 +#define PTRMAP_FREEPAGE 2 +#define PTRMAP_OVERFLOW1 3 +#define PTRMAP_OVERFLOW2 4 +#define PTRMAP_BTREE 5 + +/* A bunch of assert() statements to check the transaction state variables +** of handle p (type Btree*) are internally consistent. +*/ +#define btreeIntegrity(p) \ + assert( p->pBt->inTransaction!=TRANS_NONE || p->pBt->nTransaction==0 ); \ + assert( p->pBt->inTransaction>=p->inTrans ); + + +/* +** The ISAUTOVACUUM macro is used within balance_nonroot() to determine +** if the database supports auto-vacuum or not. Because it is used +** within an expression that is an argument to another macro +** (sqliteMallocRaw), it is not possible to use conditional compilation. +** So, this macro is defined instead. +*/ +#ifndef SQLITE_OMIT_AUTOVACUUM +#define ISAUTOVACUUM (pBt->autoVacuum) +#else +#define ISAUTOVACUUM 0 +#endif + + +/* +** This structure is passed around through all the sanity checking routines +** in order to keep track of some global state information. +*/ +typedef struct IntegrityCk IntegrityCk; +struct IntegrityCk { + BtShared *pBt; /* The tree being checked out */ + Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ + int nPage; /* Number of pages in the database */ + int *anRef; /* Number of times each page is referenced */ + int mxErr; /* Stop accumulating errors when this reaches zero */ + char *zErrMsg; /* An error message. NULL if no errors seen. */ + int nErr; /* Number of messages written to zErrMsg so far */ +}; + +/* +** Read or write a two- and four-byte big-endian integer values. +*/ +#define get2byte(x) ((x)[0]<<8 | (x)[1]) +#define put2byte(p,v) ((p)[0] = (v)>>8, (p)[1] = (v)) +#define get4byte sqlite3Get4byte +#define put4byte sqlite3Put4byte + +/* +** Internal routines that should be accessed by the btree layer only. +*/ +int sqlite3BtreeGetPage(BtShared*, Pgno, MemPage**, int); +int sqlite3BtreeInitPage(MemPage *pPage, MemPage *pParent); +void sqlite3BtreeParseCellPtr(MemPage*, u8*, CellInfo*); +void sqlite3BtreeParseCell(MemPage*, int, CellInfo*); +#ifdef SQLITE_TEST +u8 *sqlite3BtreeFindCell(MemPage *pPage, int iCell); +#endif +int sqlite3BtreeRestoreOrClearCursorPosition(BtCursor *pCur); +void sqlite3BtreeGetTempCursor(BtCursor *pCur, BtCursor *pTempCur); +void sqlite3BtreeReleaseTempCursor(BtCursor *pCur); +int sqlite3BtreeIsRootPage(MemPage *pPage); +void sqlite3BtreeMoveToParent(BtCursor *pCur); diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/build.c b/libraries/sqlite/unix/sqlite-3.5.1/src/build.c new file mode 100644 index 0000000000..f46d28ee1e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/build.c @@ -0,0 +1,3409 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the SQLite parser +** when syntax rules are reduced. The routines in this file handle the +** following kinds of SQL syntax: +** +** CREATE TABLE +** DROP TABLE +** CREATE INDEX +** DROP INDEX +** creating ID lists +** BEGIN TRANSACTION +** COMMIT +** ROLLBACK +** +** $Id: build.c,v 1.444 2007/09/03 15:19:35 drh Exp $ +*/ +#include "sqliteInt.h" +#include + +/* +** This routine is called when a new SQL statement is beginning to +** be parsed. Initialize the pParse structure as needed. +*/ +void sqlite3BeginParse(Parse *pParse, int explainFlag){ + pParse->explain = explainFlag; + pParse->nVar = 0; +} + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** The TableLock structure is only used by the sqlite3TableLock() and +** codeTableLocks() functions. +*/ +struct TableLock { + int iDb; /* The database containing the table to be locked */ + int iTab; /* The root page of the table to be locked */ + u8 isWriteLock; /* True for write lock. False for a read lock */ + const char *zName; /* Name of the table */ +}; + +/* +** Record the fact that we want to lock a table at run-time. +** +** The table to be locked has root page iTab and is found in database iDb. +** A read or a write lock can be taken depending on isWritelock. +** +** This routine just records the fact that the lock is desired. The +** code to make the lock occur is generated by a later call to +** codeTableLocks() which occurs during sqlite3FinishCoding(). +*/ +void sqlite3TableLock( + Parse *pParse, /* Parsing context */ + int iDb, /* Index of the database containing the table to lock */ + int iTab, /* Root page number of the table to be locked */ + u8 isWriteLock, /* True for a write lock */ + const char *zName /* Name of the table to be locked */ +){ + int i; + int nBytes; + TableLock *p; + + if( iDb<0 ){ + return; + } + + for(i=0; inTableLock; i++){ + p = &pParse->aTableLock[i]; + if( p->iDb==iDb && p->iTab==iTab ){ + p->isWriteLock = (p->isWriteLock || isWriteLock); + return; + } + } + + nBytes = sizeof(TableLock) * (pParse->nTableLock+1); + pParse->aTableLock = + sqlite3DbReallocOrFree(pParse->db, pParse->aTableLock, nBytes); + if( pParse->aTableLock ){ + p = &pParse->aTableLock[pParse->nTableLock++]; + p->iDb = iDb; + p->iTab = iTab; + p->isWriteLock = isWriteLock; + p->zName = zName; + }else{ + pParse->nTableLock = 0; + pParse->db->mallocFailed = 1; + } +} + +/* +** Code an OP_TableLock instruction for each table locked by the +** statement (configured by calls to sqlite3TableLock()). +*/ +static void codeTableLocks(Parse *pParse){ + int i; + Vdbe *pVdbe; + + if( 0==(pVdbe = sqlite3GetVdbe(pParse)) ){ + return; + } + + for(i=0; inTableLock; i++){ + TableLock *p = &pParse->aTableLock[i]; + int p1 = p->iDb; + if( p->isWriteLock ){ + p1 = -1*(p1+1); + } + sqlite3VdbeOp3(pVdbe, OP_TableLock, p1, p->iTab, p->zName, P3_STATIC); + } +} +#else + #define codeTableLocks(x) +#endif + +/* +** This routine is called after a single SQL statement has been +** parsed and a VDBE program to execute that statement has been +** prepared. This routine puts the finishing touches on the +** VDBE program and resets the pParse structure for the next +** parse. +** +** Note that if an error occurred, it might be the case that +** no VDBE code was generated. +*/ +void sqlite3FinishCoding(Parse *pParse){ + sqlite3 *db; + Vdbe *v; + + db = pParse->db; + if( db->mallocFailed ) return; + if( pParse->nested ) return; + if( !pParse->pVdbe ){ + if( pParse->rc==SQLITE_OK && pParse->nErr ){ + pParse->rc = SQLITE_ERROR; + return; + } + } + + /* Begin by generating some termination code at the end of the + ** vdbe program + */ + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + + /* The cookie mask contains one bit for each database file open. + ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are + ** set for each database that is used. Generate code to start a + ** transaction on each used database and to verify the schema cookie + ** on each used database. + */ + if( pParse->cookieGoto>0 ){ + u32 mask; + int iDb; + sqlite3VdbeJumpHere(v, pParse->cookieGoto-1); + for(iDb=0, mask=1; iDbnDb; mask<<=1, iDb++){ + if( (mask & pParse->cookieMask)==0 ) continue; + sqlite3VdbeUsesBtree(v, iDb); + sqlite3VdbeAddOp(v, OP_Transaction, iDb, (mask & pParse->writeMask)!=0); + sqlite3VdbeAddOp(v, OP_VerifyCookie, iDb, pParse->cookieValue[iDb]); + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pParse->pVirtualLock ){ + char *vtab = (char *)pParse->pVirtualLock->pVtab; + sqlite3VdbeOp3(v, OP_VBegin, 0, 0, vtab, P3_VTAB); + } +#endif + + /* Once all the cookies have been verified and transactions opened, + ** obtain the required table-locks. This is a no-op unless the + ** shared-cache feature is enabled. + */ + codeTableLocks(pParse); + sqlite3VdbeAddOp(v, OP_Goto, 0, pParse->cookieGoto); + } + +#ifndef SQLITE_OMIT_TRACE + /* Add a No-op that contains the complete text of the compiled SQL + ** statement as its P3 argument. This does not change the functionality + ** of the program. + ** + ** This is used to implement sqlite3_trace(). + */ + sqlite3VdbeOp3(v, OP_Noop, 0, 0, pParse->zSql, pParse->zTail-pParse->zSql); +#endif /* SQLITE_OMIT_TRACE */ + } + + + /* Get the VDBE program ready for execution + */ + if( v && pParse->nErr==0 && !db->mallocFailed ){ +#ifdef SQLITE_DEBUG + FILE *trace = (db->flags & SQLITE_VdbeTrace)!=0 ? stdout : 0; + sqlite3VdbeTrace(v, trace); +#endif + sqlite3VdbeMakeReady(v, pParse->nVar, pParse->nMem+3, + pParse->nTab+3, pParse->explain); + pParse->rc = SQLITE_DONE; + pParse->colNamesSet = 0; + }else if( pParse->rc==SQLITE_OK ){ + pParse->rc = SQLITE_ERROR; + } + pParse->nTab = 0; + pParse->nMem = 0; + pParse->nSet = 0; + pParse->nVar = 0; + pParse->cookieMask = 0; + pParse->cookieGoto = 0; +} + +/* +** Run the parser and code generator recursively in order to generate +** code for the SQL statement given onto the end of the pParse context +** currently under construction. When the parser is run recursively +** this way, the final OP_Halt is not appended and other initialization +** and finalization steps are omitted because those are handling by the +** outermost parser. +** +** Not everything is nestable. This facility is designed to permit +** INSERT, UPDATE, and DELETE operations against SQLITE_MASTER. Use +** care if you decide to try to use this routine for some other purposes. +*/ +void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ + va_list ap; + char *zSql; +# define SAVE_SZ (sizeof(Parse) - offsetof(Parse,nVar)) + char saveBuf[SAVE_SZ]; + + if( pParse->nErr ) return; + assert( pParse->nested<10 ); /* Nesting should only be of limited depth */ + va_start(ap, zFormat); + zSql = sqlite3VMPrintf(pParse->db, zFormat, ap); + va_end(ap); + if( zSql==0 ){ + pParse->db->mallocFailed = 1; + return; /* A malloc must have failed */ + } + pParse->nested++; + memcpy(saveBuf, &pParse->nVar, SAVE_SZ); + memset(&pParse->nVar, 0, SAVE_SZ); + sqlite3RunParser(pParse, zSql, 0); + sqlite3_free(zSql); + memcpy(&pParse->nVar, saveBuf, SAVE_SZ); + pParse->nested--; +} + +/* +** Locate the in-memory structure that describes a particular database +** table given the name of that table and (optionally) the name of the +** database containing the table. Return NULL if not found. +** +** If zDatabase is 0, all databases are searched for the table and the +** first matching table is returned. (No checking for duplicate table +** names is done.) The search order is TEMP first, then MAIN, then any +** auxiliary databases added using the ATTACH command. +** +** See also sqlite3LocateTable(). +*/ +Table *sqlite3FindTable(sqlite3 *db, const char *zName, const char *zDatabase){ + Table *p = 0; + int i; + assert( zName!=0 ); + for(i=OMIT_TEMPDB; inDb; i++){ + int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ + if( zDatabase!=0 && sqlite3StrICmp(zDatabase, db->aDb[j].zName) ) continue; + p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName, strlen(zName)+1); + if( p ) break; + } + return p; +} + +/* +** Locate the in-memory structure that describes a particular database +** table given the name of that table and (optionally) the name of the +** database containing the table. Return NULL if not found. Also leave an +** error message in pParse->zErrMsg. +** +** The difference between this routine and sqlite3FindTable() is that this +** routine leaves an error message in pParse->zErrMsg where +** sqlite3FindTable() does not. +*/ +Table *sqlite3LocateTable(Parse *pParse, const char *zName, const char *zDbase){ + Table *p; + + /* Read the database schema. If an error occurs, leave an error message + ** and code in pParse and return NULL. */ + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + return 0; + } + + p = sqlite3FindTable(pParse->db, zName, zDbase); + if( p==0 ){ + if( zDbase ){ + sqlite3ErrorMsg(pParse, "no such table: %s.%s", zDbase, zName); + }else{ + sqlite3ErrorMsg(pParse, "no such table: %s", zName); + } + pParse->checkSchema = 1; + } + return p; +} + +/* +** Locate the in-memory structure that describes +** a particular index given the name of that index +** and the name of the database that contains the index. +** Return NULL if not found. +** +** If zDatabase is 0, all databases are searched for the +** table and the first matching index is returned. (No checking +** for duplicate index names is done.) The search order is +** TEMP first, then MAIN, then any auxiliary databases added +** using the ATTACH command. +*/ +Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const char *zDb){ + Index *p = 0; + int i; + for(i=OMIT_TEMPDB; inDb; i++){ + int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ + Schema *pSchema = db->aDb[j].pSchema; + if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zName) ) continue; + assert( pSchema || (j==1 && !db->aDb[1].pBt) ); + if( pSchema ){ + p = sqlite3HashFind(&pSchema->idxHash, zName, strlen(zName)+1); + } + if( p ) break; + } + return p; +} + +/* +** Reclaim the memory used by an index +*/ +static void freeIndex(Index *p){ + sqlite3_free(p->zColAff); + sqlite3_free(p); +} + +/* +** Remove the given index from the index hash table, and free +** its memory structures. +** +** The index is removed from the database hash tables but +** it is not unlinked from the Table that it indexes. +** Unlinking from the Table must be done by the calling function. +*/ +static void sqliteDeleteIndex(Index *p){ + Index *pOld; + const char *zName = p->zName; + + pOld = sqlite3HashInsert(&p->pSchema->idxHash, zName, strlen( zName)+1, 0); + assert( pOld==0 || pOld==p ); + freeIndex(p); +} + +/* +** For the index called zIdxName which is found in the database iDb, +** unlike that index from its Table then remove the index from +** the index hash table and free all memory structures associated +** with the index. +*/ +void sqlite3UnlinkAndDeleteIndex(sqlite3 *db, int iDb, const char *zIdxName){ + Index *pIndex; + int len; + Hash *pHash = &db->aDb[iDb].pSchema->idxHash; + + len = strlen(zIdxName); + pIndex = sqlite3HashInsert(pHash, zIdxName, len+1, 0); + if( pIndex ){ + if( pIndex->pTable->pIndex==pIndex ){ + pIndex->pTable->pIndex = pIndex->pNext; + }else{ + Index *p; + for(p=pIndex->pTable->pIndex; p && p->pNext!=pIndex; p=p->pNext){} + if( p && p->pNext==pIndex ){ + p->pNext = pIndex->pNext; + } + } + freeIndex(pIndex); + } + db->flags |= SQLITE_InternChanges; +} + +/* +** Erase all schema information from the in-memory hash tables of +** a single database. This routine is called to reclaim memory +** before the database closes. It is also called during a rollback +** if there were schema changes during the transaction or if a +** schema-cookie mismatch occurs. +** +** If iDb<=0 then reset the internal schema tables for all database +** files. If iDb>=2 then reset the internal schema for only the +** single file indicated. +*/ +void sqlite3ResetInternalSchema(sqlite3 *db, int iDb){ + int i, j; + + assert( iDb>=0 && iDbnDb ); + for(i=iDb; inDb; i++){ + Db *pDb = &db->aDb[i]; + if( pDb->pSchema ){ + sqlite3SchemaFree(pDb->pSchema); + } + if( iDb>0 ) return; + } + assert( iDb==0 ); + db->flags &= ~SQLITE_InternChanges; + + /* If one or more of the auxiliary database files has been closed, + ** then remove them from the auxiliary database list. We take the + ** opportunity to do this here since we have just deleted all of the + ** schema hash tables and therefore do not have to make any changes + ** to any of those tables. + */ + for(i=0; inDb; i++){ + struct Db *pDb = &db->aDb[i]; + if( pDb->pBt==0 ){ + if( pDb->pAux && pDb->xFreeAux ) pDb->xFreeAux(pDb->pAux); + pDb->pAux = 0; + } + } + for(i=j=2; inDb; i++){ + struct Db *pDb = &db->aDb[i]; + if( pDb->pBt==0 ){ + sqlite3_free(pDb->zName); + pDb->zName = 0; + continue; + } + if( jaDb[j] = db->aDb[i]; + } + j++; + } + memset(&db->aDb[j], 0, (db->nDb-j)*sizeof(db->aDb[j])); + db->nDb = j; + if( db->nDb<=2 && db->aDb!=db->aDbStatic ){ + memcpy(db->aDbStatic, db->aDb, 2*sizeof(db->aDb[0])); + sqlite3_free(db->aDb); + db->aDb = db->aDbStatic; + } +} + +/* +** This routine is called when a commit occurs. +*/ +void sqlite3CommitInternalChanges(sqlite3 *db){ + db->flags &= ~SQLITE_InternChanges; +} + +/* +** Clear the column names from a table or view. +*/ +static void sqliteResetColumnNames(Table *pTable){ + int i; + Column *pCol; + assert( pTable!=0 ); + if( (pCol = pTable->aCol)!=0 ){ + for(i=0; inCol; i++, pCol++){ + sqlite3_free(pCol->zName); + sqlite3ExprDelete(pCol->pDflt); + sqlite3_free(pCol->zType); + sqlite3_free(pCol->zColl); + } + sqlite3_free(pTable->aCol); + } + pTable->aCol = 0; + pTable->nCol = 0; +} + +/* +** Remove the memory data structures associated with the given +** Table. No changes are made to disk by this routine. +** +** This routine just deletes the data structure. It does not unlink +** the table data structure from the hash table. Nor does it remove +** foreign keys from the sqlite.aFKey hash table. But it does destroy +** memory structures of the indices and foreign keys associated with +** the table. +*/ +void sqlite3DeleteTable(Table *pTable){ + Index *pIndex, *pNext; + FKey *pFKey, *pNextFKey; + + if( pTable==0 ) return; + + /* Do not delete the table until the reference count reaches zero. */ + pTable->nRef--; + if( pTable->nRef>0 ){ + return; + } + assert( pTable->nRef==0 ); + + /* Delete all indices associated with this table + */ + for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){ + pNext = pIndex->pNext; + assert( pIndex->pSchema==pTable->pSchema ); + sqliteDeleteIndex(pIndex); + } + +#ifndef SQLITE_OMIT_FOREIGN_KEY + /* Delete all foreign keys associated with this table. The keys + ** should have already been unlinked from the pSchema->aFKey hash table + */ + for(pFKey=pTable->pFKey; pFKey; pFKey=pNextFKey){ + pNextFKey = pFKey->pNextFrom; + assert( sqlite3HashFind(&pTable->pSchema->aFKey, + pFKey->zTo, strlen(pFKey->zTo)+1)!=pFKey ); + sqlite3_free(pFKey); + } +#endif + + /* Delete the Table structure itself. + */ + sqliteResetColumnNames(pTable); + sqlite3_free(pTable->zName); + sqlite3_free(pTable->zColAff); + sqlite3SelectDelete(pTable->pSelect); +#ifndef SQLITE_OMIT_CHECK + sqlite3ExprDelete(pTable->pCheck); +#endif + sqlite3VtabClear(pTable); + sqlite3_free(pTable); +} + +/* +** Unlink the given table from the hash tables and the delete the +** table structure with all its indices and foreign keys. +*/ +void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char *zTabName){ + Table *p; + FKey *pF1, *pF2; + Db *pDb; + + assert( db!=0 ); + assert( iDb>=0 && iDbnDb ); + assert( zTabName && zTabName[0] ); + pDb = &db->aDb[iDb]; + p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, strlen(zTabName)+1,0); + if( p ){ +#ifndef SQLITE_OMIT_FOREIGN_KEY + for(pF1=p->pFKey; pF1; pF1=pF1->pNextFrom){ + int nTo = strlen(pF1->zTo) + 1; + pF2 = sqlite3HashFind(&pDb->pSchema->aFKey, pF1->zTo, nTo); + if( pF2==pF1 ){ + sqlite3HashInsert(&pDb->pSchema->aFKey, pF1->zTo, nTo, pF1->pNextTo); + }else{ + while( pF2 && pF2->pNextTo!=pF1 ){ pF2=pF2->pNextTo; } + if( pF2 ){ + pF2->pNextTo = pF1->pNextTo; + } + } + } +#endif + sqlite3DeleteTable(p); + } + db->flags |= SQLITE_InternChanges; +} + +/* +** Given a token, return a string that consists of the text of that +** token with any quotations removed. Space to hold the returned string +** is obtained from sqliteMalloc() and must be freed by the calling +** function. +** +** Tokens are often just pointers into the original SQL text and so +** are not \000 terminated and are not persistent. The returned string +** is \000 terminated and is persistent. +*/ +char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ + char *zName; + if( pName ){ + zName = sqlite3DbStrNDup(db, (char*)pName->z, pName->n); + sqlite3Dequote(zName); + }else{ + zName = 0; + } + return zName; +} + +/* +** Open the sqlite_master table stored in database number iDb for +** writing. The table is opened using cursor 0. +*/ +void sqlite3OpenMasterTable(Parse *p, int iDb){ + Vdbe *v = sqlite3GetVdbe(p); + sqlite3TableLock(p, iDb, MASTER_ROOT, 1, SCHEMA_TABLE(iDb)); + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + sqlite3VdbeAddOp(v, OP_OpenWrite, 0, MASTER_ROOT); + sqlite3VdbeAddOp(v, OP_SetNumColumns, 0, 5); /* sqlite_master has 5 columns */ +} + +/* +** The token *pName contains the name of a database (either "main" or +** "temp" or the name of an attached db). This routine returns the +** index of the named database in db->aDb[], or -1 if the named db +** does not exist. +*/ +int sqlite3FindDb(sqlite3 *db, Token *pName){ + int i = -1; /* Database number */ + int n; /* Number of characters in the name */ + Db *pDb; /* A database whose name space is being searched */ + char *zName; /* Name we are searching for */ + + zName = sqlite3NameFromToken(db, pName); + if( zName ){ + n = strlen(zName); + for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){ + if( (!OMIT_TEMPDB || i!=1 ) && n==strlen(pDb->zName) && + 0==sqlite3StrICmp(pDb->zName, zName) ){ + break; + } + } + sqlite3_free(zName); + } + return i; +} + +/* The table or view or trigger name is passed to this routine via tokens +** pName1 and pName2. If the table name was fully qualified, for example: +** +** CREATE TABLE xxx.yyy (...); +** +** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if +** the table name is not fully qualified, i.e.: +** +** CREATE TABLE yyy(...); +** +** Then pName1 is set to "yyy" and pName2 is "". +** +** This routine sets the *ppUnqual pointer to point at the token (pName1 or +** pName2) that stores the unqualified table name. The index of the +** database "xxx" is returned. +*/ +int sqlite3TwoPartName( + Parse *pParse, /* Parsing and code generating context */ + Token *pName1, /* The "xxx" in the name "xxx.yyy" or "xxx" */ + Token *pName2, /* The "yyy" in the name "xxx.yyy" */ + Token **pUnqual /* Write the unqualified object name here */ +){ + int iDb; /* Database holding the object */ + sqlite3 *db = pParse->db; + + if( pName2 && pName2->n>0 ){ + assert( !db->init.busy ); + *pUnqual = pName2; + iDb = sqlite3FindDb(db, pName1); + if( iDb<0 ){ + sqlite3ErrorMsg(pParse, "unknown database %T", pName1); + pParse->nErr++; + return -1; + } + }else{ + assert( db->init.iDb==0 || db->init.busy ); + iDb = db->init.iDb; + *pUnqual = pName1; + } + return iDb; +} + +/* +** This routine is used to check if the UTF-8 string zName is a legal +** unqualified name for a new schema object (table, index, view or +** trigger). All names are legal except those that begin with the string +** "sqlite_" (in upper, lower or mixed case). This portion of the namespace +** is reserved for internal use. +*/ +int sqlite3CheckObjectName(Parse *pParse, const char *zName){ + if( !pParse->db->init.busy && pParse->nested==0 + && (pParse->db->flags & SQLITE_WriteSchema)==0 + && 0==sqlite3StrNICmp(zName, "sqlite_", 7) ){ + sqlite3ErrorMsg(pParse, "object name reserved for internal use: %s", zName); + return SQLITE_ERROR; + } + return SQLITE_OK; +} + +/* +** Begin constructing a new table representation in memory. This is +** the first of several action routines that get called in response +** to a CREATE TABLE statement. In particular, this routine is called +** after seeing tokens "CREATE" and "TABLE" and the table name. The isTemp +** flag is true if the table should be stored in the auxiliary database +** file instead of in the main database file. This is normally the case +** when the "TEMP" or "TEMPORARY" keyword occurs in between +** CREATE and TABLE. +** +** The new table record is initialized and put in pParse->pNewTable. +** As more of the CREATE TABLE statement is parsed, additional action +** routines will be called to add more information to this record. +** At the end of the CREATE TABLE statement, the sqlite3EndTable() routine +** is called to complete the construction of the new table record. +*/ +void sqlite3StartTable( + Parse *pParse, /* Parser context */ + Token *pName1, /* First part of the name of the table or view */ + Token *pName2, /* Second part of the name of the table or view */ + int isTemp, /* True if this is a TEMP table */ + int isView, /* True if this is a VIEW */ + int isVirtual, /* True if this is a VIRTUAL table */ + int noErr /* Do nothing if table already exists */ +){ + Table *pTable; + char *zName = 0; /* The name of the new table */ + sqlite3 *db = pParse->db; + Vdbe *v; + int iDb; /* Database number to create the table in */ + Token *pName; /* Unqualified name of the table to create */ + + /* The table or view name to create is passed to this routine via tokens + ** pName1 and pName2. If the table name was fully qualified, for example: + ** + ** CREATE TABLE xxx.yyy (...); + ** + ** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if + ** the table name is not fully qualified, i.e.: + ** + ** CREATE TABLE yyy(...); + ** + ** Then pName1 is set to "yyy" and pName2 is "". + ** + ** The call below sets the pName pointer to point at the token (pName1 or + ** pName2) that stores the unqualified table name. The variable iDb is + ** set to the index of the database that the table or view is to be + ** created in. + */ + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); + if( iDb<0 ) return; + if( !OMIT_TEMPDB && isTemp && iDb>1 ){ + /* If creating a temp table, the name may not be qualified */ + sqlite3ErrorMsg(pParse, "temporary table name must be unqualified"); + return; + } + if( !OMIT_TEMPDB && isTemp ) iDb = 1; + + pParse->sNameToken = *pName; + zName = sqlite3NameFromToken(db, pName); + if( zName==0 ) return; + if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ + goto begin_table_error; + } + if( db->init.iDb==1 ) isTemp = 1; +#ifndef SQLITE_OMIT_AUTHORIZATION + assert( (isTemp & 1)==isTemp ); + { + int code; + char *zDb = db->aDb[iDb].zName; + if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){ + goto begin_table_error; + } + if( isView ){ + if( !OMIT_TEMPDB && isTemp ){ + code = SQLITE_CREATE_TEMP_VIEW; + }else{ + code = SQLITE_CREATE_VIEW; + } + }else{ + if( !OMIT_TEMPDB && isTemp ){ + code = SQLITE_CREATE_TEMP_TABLE; + }else{ + code = SQLITE_CREATE_TABLE; + } + } + if( !isVirtual && sqlite3AuthCheck(pParse, code, zName, 0, zDb) ){ + goto begin_table_error; + } + } +#endif + + /* Make sure the new table name does not collide with an existing + ** index or table name in the same database. Issue an error message if + ** it does. The exception is if the statement being parsed was passed + ** to an sqlite3_declare_vtab() call. In that case only the column names + ** and types will be used, so there is no need to test for namespace + ** collisions. + */ + if( !IN_DECLARE_VTAB ){ + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + goto begin_table_error; + } + pTable = sqlite3FindTable(db, zName, db->aDb[iDb].zName); + if( pTable ){ + if( !noErr ){ + sqlite3ErrorMsg(pParse, "table %T already exists", pName); + } + goto begin_table_error; + } + if( sqlite3FindIndex(db, zName, 0)!=0 && (iDb==0 || !db->init.busy) ){ + sqlite3ErrorMsg(pParse, "there is already an index named %s", zName); + goto begin_table_error; + } + } + + pTable = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTable==0 ){ + db->mallocFailed = 1; + pParse->rc = SQLITE_NOMEM; + pParse->nErr++; + goto begin_table_error; + } + pTable->zName = zName; + pTable->iPKey = -1; + pTable->pSchema = db->aDb[iDb].pSchema; + pTable->nRef = 1; + if( pParse->pNewTable ) sqlite3DeleteTable(pParse->pNewTable); + pParse->pNewTable = pTable; + + /* If this is the magic sqlite_sequence table used by autoincrement, + ** then record a pointer to this table in the main database structure + ** so that INSERT can find the table easily. + */ +#ifndef SQLITE_OMIT_AUTOINCREMENT + if( !pParse->nested && strcmp(zName, "sqlite_sequence")==0 ){ + pTable->pSchema->pSeqTab = pTable; + } +#endif + + /* Begin generating the code that will insert the table record into + ** the SQLITE_MASTER table. Note in particular that we must go ahead + ** and allocate the record number for the table entry now. Before any + ** PRIMARY KEY or UNIQUE keywords are parsed. Those keywords will cause + ** indices to be created and the table record must come before the + ** indices. Hence, the record number for the table must be allocated + ** now. + */ + if( !db->init.busy && (v = sqlite3GetVdbe(pParse))!=0 ){ + int lbl; + int fileFormat; + sqlite3BeginWriteOperation(pParse, 0, iDb); + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( isVirtual ){ + sqlite3VdbeAddOp(v, OP_VBegin, 0, 0); + } +#endif + + /* If the file format and encoding in the database have not been set, + ** set them now. + */ + sqlite3VdbeAddOp(v, OP_ReadCookie, iDb, 1); /* file_format */ + sqlite3VdbeUsesBtree(v, iDb); + lbl = sqlite3VdbeMakeLabel(v); + sqlite3VdbeAddOp(v, OP_If, 0, lbl); + fileFormat = (db->flags & SQLITE_LegacyFileFmt)!=0 ? + 1 : SQLITE_MAX_FILE_FORMAT; + sqlite3VdbeAddOp(v, OP_Integer, fileFormat, 0); + sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 1); + sqlite3VdbeAddOp(v, OP_Integer, ENC(db), 0); + sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 4); + sqlite3VdbeResolveLabel(v, lbl); + + /* This just creates a place-holder record in the sqlite_master table. + ** The record created does not contain anything yet. It will be replaced + ** by the real entry in code generated at sqlite3EndTable(). + ** + ** The rowid for the new entry is left on the top of the stack. + ** The rowid value is needed by the code that sqlite3EndTable will + ** generate. + */ +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) + if( isView || isVirtual ){ + sqlite3VdbeAddOp(v, OP_Integer, 0, 0); + }else +#endif + { + sqlite3VdbeAddOp(v, OP_CreateTable, iDb, 0); + } + sqlite3OpenMasterTable(pParse, iDb); + sqlite3VdbeAddOp(v, OP_NewRowid, 0, 0); + sqlite3VdbeAddOp(v, OP_Dup, 0, 0); + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp(v, OP_Insert, 0, OPFLAG_APPEND); + sqlite3VdbeAddOp(v, OP_Close, 0, 0); + sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + } + + /* Normal (non-error) return. */ + return; + + /* If an error occurs, we jump here */ +begin_table_error: + sqlite3_free(zName); + return; +} + +/* +** This macro is used to compare two strings in a case-insensitive manner. +** It is slightly faster than calling sqlite3StrICmp() directly, but +** produces larger code. +** +** WARNING: This macro is not compatible with the strcmp() family. It +** returns true if the two strings are equal, otherwise false. +*/ +#define STRICMP(x, y) (\ +sqlite3UpperToLower[*(unsigned char *)(x)]== \ +sqlite3UpperToLower[*(unsigned char *)(y)] \ +&& sqlite3StrICmp((x)+1,(y)+1)==0 ) + +/* +** Add a new column to the table currently being constructed. +** +** The parser calls this routine once for each column declaration +** in a CREATE TABLE statement. sqlite3StartTable() gets called +** first to get things going. Then this routine is called for each +** column. +*/ +void sqlite3AddColumn(Parse *pParse, Token *pName){ + Table *p; + int i; + char *z; + Column *pCol; + if( (p = pParse->pNewTable)==0 ) return; + if( p->nCol+1>SQLITE_MAX_COLUMN ){ + sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); + return; + } + z = sqlite3NameFromToken(pParse->db, pName); + if( z==0 ) return; + for(i=0; inCol; i++){ + if( STRICMP(z, p->aCol[i].zName) ){ + sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); + sqlite3_free(z); + return; + } + } + if( (p->nCol & 0x7)==0 ){ + Column *aNew; + aNew = sqlite3DbRealloc(pParse->db,p->aCol,(p->nCol+8)*sizeof(p->aCol[0])); + if( aNew==0 ){ + sqlite3_free(z); + return; + } + p->aCol = aNew; + } + pCol = &p->aCol[p->nCol]; + memset(pCol, 0, sizeof(p->aCol[0])); + pCol->zName = z; + + /* If there is no type specified, columns have the default affinity + ** 'NONE'. If there is a type specified, then sqlite3AddColumnType() will + ** be called next to set pCol->affinity correctly. + */ + pCol->affinity = SQLITE_AFF_NONE; + p->nCol++; +} + +/* +** This routine is called by the parser while in the middle of +** parsing a CREATE TABLE statement. A "NOT NULL" constraint has +** been seen on a column. This routine sets the notNull flag on +** the column currently under construction. +*/ +void sqlite3AddNotNull(Parse *pParse, int onError){ + Table *p; + int i; + if( (p = pParse->pNewTable)==0 ) return; + i = p->nCol-1; + if( i>=0 ) p->aCol[i].notNull = onError; +} + +/* +** Scan the column type name zType (length nType) and return the +** associated affinity type. +** +** This routine does a case-independent search of zType for the +** substrings in the following table. If one of the substrings is +** found, the corresponding affinity is returned. If zType contains +** more than one of the substrings, entries toward the top of +** the table take priority. For example, if zType is 'BLOBINT', +** SQLITE_AFF_INTEGER is returned. +** +** Substring | Affinity +** -------------------------------- +** 'INT' | SQLITE_AFF_INTEGER +** 'CHAR' | SQLITE_AFF_TEXT +** 'CLOB' | SQLITE_AFF_TEXT +** 'TEXT' | SQLITE_AFF_TEXT +** 'BLOB' | SQLITE_AFF_NONE +** 'REAL' | SQLITE_AFF_REAL +** 'FLOA' | SQLITE_AFF_REAL +** 'DOUB' | SQLITE_AFF_REAL +** +** If none of the substrings in the above table are found, +** SQLITE_AFF_NUMERIC is returned. +*/ +char sqlite3AffinityType(const Token *pType){ + u32 h = 0; + char aff = SQLITE_AFF_NUMERIC; + const unsigned char *zIn = pType->z; + const unsigned char *zEnd = &pType->z[pType->n]; + + while( zIn!=zEnd ){ + h = (h<<8) + sqlite3UpperToLower[*zIn]; + zIn++; + if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ + aff = SQLITE_AFF_TEXT; + }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){ /* CLOB */ + aff = SQLITE_AFF_TEXT; + }else if( h==(('t'<<24)+('e'<<16)+('x'<<8)+'t') ){ /* TEXT */ + aff = SQLITE_AFF_TEXT; + }else if( h==(('b'<<24)+('l'<<16)+('o'<<8)+'b') /* BLOB */ + && (aff==SQLITE_AFF_NUMERIC || aff==SQLITE_AFF_REAL) ){ + aff = SQLITE_AFF_NONE; +#ifndef SQLITE_OMIT_FLOATING_POINT + }else if( h==(('r'<<24)+('e'<<16)+('a'<<8)+'l') /* REAL */ + && aff==SQLITE_AFF_NUMERIC ){ + aff = SQLITE_AFF_REAL; + }else if( h==(('f'<<24)+('l'<<16)+('o'<<8)+'a') /* FLOA */ + && aff==SQLITE_AFF_NUMERIC ){ + aff = SQLITE_AFF_REAL; + }else if( h==(('d'<<24)+('o'<<16)+('u'<<8)+'b') /* DOUB */ + && aff==SQLITE_AFF_NUMERIC ){ + aff = SQLITE_AFF_REAL; +#endif + }else if( (h&0x00FFFFFF)==(('i'<<16)+('n'<<8)+'t') ){ /* INT */ + aff = SQLITE_AFF_INTEGER; + break; + } + } + + return aff; +} + +/* +** This routine is called by the parser while in the middle of +** parsing a CREATE TABLE statement. The pFirst token is the first +** token in the sequence of tokens that describe the type of the +** column currently under construction. pLast is the last token +** in the sequence. Use this information to construct a string +** that contains the typename of the column and store that string +** in zType. +*/ +void sqlite3AddColumnType(Parse *pParse, Token *pType){ + Table *p; + int i; + Column *pCol; + + if( (p = pParse->pNewTable)==0 ) return; + i = p->nCol-1; + if( i<0 ) return; + pCol = &p->aCol[i]; + sqlite3_free(pCol->zType); + pCol->zType = sqlite3NameFromToken(pParse->db, pType); + pCol->affinity = sqlite3AffinityType(pType); +} + +/* +** The expression is the default value for the most recently added column +** of the table currently under construction. +** +** Default value expressions must be constant. Raise an exception if this +** is not the case. +** +** This routine is called by the parser while in the middle of +** parsing a CREATE TABLE statement. +*/ +void sqlite3AddDefaultValue(Parse *pParse, Expr *pExpr){ + Table *p; + Column *pCol; + if( (p = pParse->pNewTable)!=0 ){ + pCol = &(p->aCol[p->nCol-1]); + if( !sqlite3ExprIsConstantOrFunction(pExpr) ){ + sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant", + pCol->zName); + }else{ + Expr *pCopy; + sqlite3 *db = pParse->db; + sqlite3ExprDelete(pCol->pDflt); + pCol->pDflt = pCopy = sqlite3ExprDup(db, pExpr); + if( pCopy ){ + sqlite3TokenCopy(db, &pCopy->span, &pExpr->span); + } + } + } + sqlite3ExprDelete(pExpr); +} + +/* +** Designate the PRIMARY KEY for the table. pList is a list of names +** of columns that form the primary key. If pList is NULL, then the +** most recently added column of the table is the primary key. +** +** A table can have at most one primary key. If the table already has +** a primary key (and this is the second primary key) then create an +** error. +** +** If the PRIMARY KEY is on a single column whose datatype is INTEGER, +** then we will try to use that column as the rowid. Set the Table.iPKey +** field of the table under construction to be the index of the +** INTEGER PRIMARY KEY column. Table.iPKey is set to -1 if there is +** no INTEGER PRIMARY KEY. +** +** If the key is not an INTEGER PRIMARY KEY, then create a unique +** index for the key. No index is created for INTEGER PRIMARY KEYs. +*/ +void sqlite3AddPrimaryKey( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List of field names to be indexed */ + int onError, /* What to do with a uniqueness conflict */ + int autoInc, /* True if the AUTOINCREMENT keyword is present */ + int sortOrder /* SQLITE_SO_ASC or SQLITE_SO_DESC */ +){ + Table *pTab = pParse->pNewTable; + char *zType = 0; + int iCol = -1, i; + if( pTab==0 || IN_DECLARE_VTAB ) goto primary_key_exit; + if( pTab->hasPrimKey ){ + sqlite3ErrorMsg(pParse, + "table \"%s\" has more than one primary key", pTab->zName); + goto primary_key_exit; + } + pTab->hasPrimKey = 1; + if( pList==0 ){ + iCol = pTab->nCol - 1; + pTab->aCol[iCol].isPrimKey = 1; + }else{ + for(i=0; inExpr; i++){ + for(iCol=0; iColnCol; iCol++){ + if( sqlite3StrICmp(pList->a[i].zName, pTab->aCol[iCol].zName)==0 ){ + break; + } + } + if( iColnCol ){ + pTab->aCol[iCol].isPrimKey = 1; + } + } + if( pList->nExpr>1 ) iCol = -1; + } + if( iCol>=0 && iColnCol ){ + zType = pTab->aCol[iCol].zType; + } + if( zType && sqlite3StrICmp(zType, "INTEGER")==0 + && sortOrder==SQLITE_SO_ASC ){ + pTab->iPKey = iCol; + pTab->keyConf = onError; + pTab->autoInc = autoInc; + }else if( autoInc ){ +#ifndef SQLITE_OMIT_AUTOINCREMENT + sqlite3ErrorMsg(pParse, "AUTOINCREMENT is only allowed on an " + "INTEGER PRIMARY KEY"); +#endif + }else{ + sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0, 0, sortOrder, 0); + pList = 0; + } + +primary_key_exit: + sqlite3ExprListDelete(pList); + return; +} + +/* +** Add a new CHECK constraint to the table currently under construction. +*/ +void sqlite3AddCheckConstraint( + Parse *pParse, /* Parsing context */ + Expr *pCheckExpr /* The check expression */ +){ +#ifndef SQLITE_OMIT_CHECK + Table *pTab = pParse->pNewTable; + sqlite3 *db = pParse->db; + if( pTab && !IN_DECLARE_VTAB ){ + /* The CHECK expression must be duplicated so that tokens refer + ** to malloced space and not the (ephemeral) text of the CREATE TABLE + ** statement */ + pTab->pCheck = sqlite3ExprAnd(db, pTab->pCheck, + sqlite3ExprDup(db, pCheckExpr)); + } +#endif + sqlite3ExprDelete(pCheckExpr); +} + +/* +** Set the collation function of the most recently parsed table column +** to the CollSeq given. +*/ +void sqlite3AddCollateType(Parse *pParse, const char *zType, int nType){ + Table *p; + int i; + + if( (p = pParse->pNewTable)==0 ) return; + i = p->nCol-1; + + if( sqlite3LocateCollSeq(pParse, zType, nType) ){ + Index *pIdx; + p->aCol[i].zColl = sqlite3DbStrNDup(pParse->db, zType, nType); + + /* If the column is declared as " PRIMARY KEY COLLATE ", + ** then an index may have been created on this column before the + ** collation type was added. Correct this if it is the case. + */ + for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ + assert( pIdx->nColumn==1 ); + if( pIdx->aiColumn[0]==i ){ + pIdx->azColl[0] = p->aCol[i].zColl; + } + } + } +} + +/* +** This function returns the collation sequence for database native text +** encoding identified by the string zName, length nName. +** +** If the requested collation sequence is not available, or not available +** in the database native encoding, the collation factory is invoked to +** request it. If the collation factory does not supply such a sequence, +** and the sequence is available in another text encoding, then that is +** returned instead. +** +** If no versions of the requested collations sequence are available, or +** another error occurs, NULL is returned and an error message written into +** pParse. +** +** This routine is a wrapper around sqlite3FindCollSeq(). This routine +** invokes the collation factory if the named collation cannot be found +** and generates an error message. +*/ +CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName, int nName){ + sqlite3 *db = pParse->db; + u8 enc = ENC(db); + u8 initbusy = db->init.busy; + CollSeq *pColl; + + pColl = sqlite3FindCollSeq(db, enc, zName, nName, initbusy); + if( !initbusy && (!pColl || !pColl->xCmp) ){ + pColl = sqlite3GetCollSeq(db, pColl, zName, nName); + if( !pColl ){ + if( nName<0 ){ + nName = strlen(zName); + } + sqlite3ErrorMsg(pParse, "no such collation sequence: %.*s", nName, zName); + pColl = 0; + } + } + + return pColl; +} + + +/* +** Generate code that will increment the schema cookie. +** +** The schema cookie is used to determine when the schema for the +** database changes. After each schema change, the cookie value +** changes. When a process first reads the schema it records the +** cookie. Thereafter, whenever it goes to access the database, +** it checks the cookie to make sure the schema has not changed +** since it was last read. +** +** This plan is not completely bullet-proof. It is possible for +** the schema to change multiple times and for the cookie to be +** set back to prior value. But schema changes are infrequent +** and the probability of hitting the same cookie value is only +** 1 chance in 2^32. So we're safe enough. +*/ +void sqlite3ChangeCookie(sqlite3 *db, Vdbe *v, int iDb){ + sqlite3VdbeAddOp(v, OP_Integer, db->aDb[iDb].pSchema->schema_cookie+1, 0); + sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 0); +} + +/* +** Measure the number of characters needed to output the given +** identifier. The number returned includes any quotes used +** but does not include the null terminator. +** +** The estimate is conservative. It might be larger that what is +** really needed. +*/ +static int identLength(const char *z){ + int n; + for(n=0; *z; n++, z++){ + if( *z=='"' ){ n++; } + } + return n + 2; +} + +/* +** Write an identifier onto the end of the given string. Add +** quote characters as needed. +*/ +static void identPut(char *z, int *pIdx, char *zSignedIdent){ + unsigned char *zIdent = (unsigned char*)zSignedIdent; + int i, j, needQuote; + i = *pIdx; + for(j=0; zIdent[j]; j++){ + if( !isalnum(zIdent[j]) && zIdent[j]!='_' ) break; + } + needQuote = zIdent[j]!=0 || isdigit(zIdent[0]) + || sqlite3KeywordCode(zIdent, j)!=TK_ID; + if( needQuote ) z[i++] = '"'; + for(j=0; zIdent[j]; j++){ + z[i++] = zIdent[j]; + if( zIdent[j]=='"' ) z[i++] = '"'; + } + if( needQuote ) z[i++] = '"'; + z[i] = 0; + *pIdx = i; +} + +/* +** Generate a CREATE TABLE statement appropriate for the given +** table. Memory to hold the text of the statement is obtained +** from sqliteMalloc() and must be freed by the calling function. +*/ +static char *createTableStmt(Table *p, int isTemp){ + int i, k, n; + char *zStmt; + char *zSep, *zSep2, *zEnd, *z; + Column *pCol; + n = 0; + for(pCol = p->aCol, i=0; inCol; i++, pCol++){ + n += identLength(pCol->zName); + z = pCol->zType; + if( z ){ + n += (strlen(z) + 1); + } + } + n += identLength(p->zName); + if( n<50 ){ + zSep = ""; + zSep2 = ","; + zEnd = ")"; + }else{ + zSep = "\n "; + zSep2 = ",\n "; + zEnd = "\n)"; + } + n += 35 + 6*p->nCol; + zStmt = sqlite3_malloc( n ); + if( zStmt==0 ) return 0; + sqlite3_snprintf(n, zStmt, + !OMIT_TEMPDB&&isTemp ? "CREATE TEMP TABLE ":"CREATE TABLE "); + k = strlen(zStmt); + identPut(zStmt, &k, p->zName); + zStmt[k++] = '('; + for(pCol=p->aCol, i=0; inCol; i++, pCol++){ + sqlite3_snprintf(n-k, &zStmt[k], zSep); + k += strlen(&zStmt[k]); + zSep = zSep2; + identPut(zStmt, &k, pCol->zName); + if( (z = pCol->zType)!=0 ){ + zStmt[k++] = ' '; + assert( strlen(z)+k+1<=n ); + sqlite3_snprintf(n-k, &zStmt[k], "%s", z); + k += strlen(z); + } + } + sqlite3_snprintf(n-k, &zStmt[k], "%s", zEnd); + return zStmt; +} + +/* +** This routine is called to report the final ")" that terminates +** a CREATE TABLE statement. +** +** The table structure that other action routines have been building +** is added to the internal hash tables, assuming no errors have +** occurred. +** +** An entry for the table is made in the master table on disk, unless +** this is a temporary table or db->init.busy==1. When db->init.busy==1 +** it means we are reading the sqlite_master table because we just +** connected to the database or because the sqlite_master table has +** recently changed, so the entry for this table already exists in +** the sqlite_master table. We do not want to create it again. +** +** If the pSelect argument is not NULL, it means that this routine +** was called to create a table generated from a +** "CREATE TABLE ... AS SELECT ..." statement. The column names of +** the new table will match the result set of the SELECT. +*/ +void sqlite3EndTable( + Parse *pParse, /* Parse context */ + Token *pCons, /* The ',' token after the last column defn. */ + Token *pEnd, /* The final ')' token in the CREATE TABLE */ + Select *pSelect /* Select from a "CREATE ... AS SELECT" */ +){ + Table *p; + sqlite3 *db = pParse->db; + int iDb; + + if( (pEnd==0 && pSelect==0) || pParse->nErr || db->mallocFailed ) { + return; + } + p = pParse->pNewTable; + if( p==0 ) return; + + assert( !db->init.busy || !pSelect ); + + iDb = sqlite3SchemaToIndex(db, p->pSchema); + +#ifndef SQLITE_OMIT_CHECK + /* Resolve names in all CHECK constraint expressions. + */ + if( p->pCheck ){ + SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ + NameContext sNC; /* Name context for pParse->pNewTable */ + + memset(&sNC, 0, sizeof(sNC)); + memset(&sSrc, 0, sizeof(sSrc)); + sSrc.nSrc = 1; + sSrc.a[0].zName = p->zName; + sSrc.a[0].pTab = p; + sSrc.a[0].iCursor = -1; + sNC.pParse = pParse; + sNC.pSrcList = &sSrc; + sNC.isCheck = 1; + if( sqlite3ExprResolveNames(&sNC, p->pCheck) ){ + return; + } + } +#endif /* !defined(SQLITE_OMIT_CHECK) */ + + /* If the db->init.busy is 1 it means we are reading the SQL off the + ** "sqlite_master" or "sqlite_temp_master" table on the disk. + ** So do not write to the disk again. Extract the root page number + ** for the table from the db->init.newTnum field. (The page number + ** should have been put there by the sqliteOpenCb routine.) + */ + if( db->init.busy ){ + p->tnum = db->init.newTnum; + } + + /* If not initializing, then create a record for the new table + ** in the SQLITE_MASTER table of the database. The record number + ** for the new table entry should already be on the stack. + ** + ** If this is a TEMPORARY table, write the entry into the auxiliary + ** file instead of into the main database file. + */ + if( !db->init.busy ){ + int n; + Vdbe *v; + char *zType; /* "view" or "table" */ + char *zType2; /* "VIEW" or "TABLE" */ + char *zStmt; /* Text of the CREATE TABLE or CREATE VIEW statement */ + + v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + + sqlite3VdbeAddOp(v, OP_Close, 0, 0); + + /* Create the rootpage for the new table and push it onto the stack. + ** A view has no rootpage, so just push a zero onto the stack for + ** views. Initialize zType at the same time. + */ + if( p->pSelect==0 ){ + /* A regular table */ + zType = "table"; + zType2 = "TABLE"; +#ifndef SQLITE_OMIT_VIEW + }else{ + /* A view */ + zType = "view"; + zType2 = "VIEW"; +#endif + } + + /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT + ** statement to populate the new table. The root-page number for the + ** new table is on the top of the vdbe stack. + ** + ** Once the SELECT has been coded by sqlite3Select(), it is in a + ** suitable state to query for the column names and types to be used + ** by the new table. + ** + ** A shared-cache write-lock is not required to write to the new table, + ** as a schema-lock must have already been obtained to create it. Since + ** a schema-lock excludes all other database users, the write-lock would + ** be redundant. + */ + if( pSelect ){ + Table *pSelTab; + sqlite3VdbeAddOp(v, OP_Dup, 0, 0); + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + sqlite3VdbeAddOp(v, OP_OpenWrite, 1, 0); + pParse->nTab = 2; + sqlite3Select(pParse, pSelect, SRT_Table, 1, 0, 0, 0, 0); + sqlite3VdbeAddOp(v, OP_Close, 1, 0); + if( pParse->nErr==0 ){ + pSelTab = sqlite3ResultSetOfSelect(pParse, 0, pSelect); + if( pSelTab==0 ) return; + assert( p->aCol==0 ); + p->nCol = pSelTab->nCol; + p->aCol = pSelTab->aCol; + pSelTab->nCol = 0; + pSelTab->aCol = 0; + sqlite3DeleteTable(pSelTab); + } + } + + /* Compute the complete text of the CREATE statement */ + if( pSelect ){ + zStmt = createTableStmt(p, p->pSchema==db->aDb[1].pSchema); + }else{ + n = pEnd->z - pParse->sNameToken.z + 1; + zStmt = sqlite3MPrintf(db, + "CREATE %s %.*s", zType2, n, pParse->sNameToken.z + ); + } + + /* A slot for the record has already been allocated in the + ** SQLITE_MASTER table. We just need to update that slot with all + ** the information we've collected. The rowid for the preallocated + ** slot is the 2nd item on the stack. The top of the stack is the + ** root page for the new table (or a 0 if this is a view). + */ + sqlite3NestedParse(pParse, + "UPDATE %Q.%s " + "SET type='%s', name=%Q, tbl_name=%Q, rootpage=#0, sql=%Q " + "WHERE rowid=#1", + db->aDb[iDb].zName, SCHEMA_TABLE(iDb), + zType, + p->zName, + p->zName, + zStmt + ); + sqlite3_free(zStmt); + sqlite3ChangeCookie(db, v, iDb); + +#ifndef SQLITE_OMIT_AUTOINCREMENT + /* Check to see if we need to create an sqlite_sequence table for + ** keeping track of autoincrement keys. + */ + if( p->autoInc ){ + Db *pDb = &db->aDb[iDb]; + if( pDb->pSchema->pSeqTab==0 ){ + sqlite3NestedParse(pParse, + "CREATE TABLE %Q.sqlite_sequence(name,seq)", + pDb->zName + ); + } + } +#endif + + /* Reparse everything to update our internal data structures */ + sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 0, + sqlite3MPrintf(db, "tbl_name='%q'",p->zName), P3_DYNAMIC); + } + + + /* Add the table to the in-memory representation of the database. + */ + if( db->init.busy && pParse->nErr==0 ){ + Table *pOld; + FKey *pFKey; + Schema *pSchema = p->pSchema; + pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, strlen(p->zName)+1,p); + if( pOld ){ + assert( p==pOld ); /* Malloc must have failed inside HashInsert() */ + db->mallocFailed = 1; + return; + } +#ifndef SQLITE_OMIT_FOREIGN_KEY + for(pFKey=p->pFKey; pFKey; pFKey=pFKey->pNextFrom){ + void *data; + int nTo = strlen(pFKey->zTo) + 1; + pFKey->pNextTo = sqlite3HashFind(&pSchema->aFKey, pFKey->zTo, nTo); + data = sqlite3HashInsert(&pSchema->aFKey, pFKey->zTo, nTo, pFKey); + if( data==(void *)pFKey ){ + db->mallocFailed = 1; + } + } +#endif + pParse->pNewTable = 0; + db->nTable++; + db->flags |= SQLITE_InternChanges; + +#ifndef SQLITE_OMIT_ALTERTABLE + if( !p->pSelect ){ + const char *zName = (const char *)pParse->sNameToken.z; + int nName; + assert( !pSelect && pCons && pEnd ); + if( pCons->z==0 ){ + pCons = pEnd; + } + nName = (const char *)pCons->z - zName; + p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName); + } +#endif + } +} + +#ifndef SQLITE_OMIT_VIEW +/* +** The parser calls this routine in order to create a new VIEW +*/ +void sqlite3CreateView( + Parse *pParse, /* The parsing context */ + Token *pBegin, /* The CREATE token that begins the statement */ + Token *pName1, /* The token that holds the name of the view */ + Token *pName2, /* The token that holds the name of the view */ + Select *pSelect, /* A SELECT statement that will become the new view */ + int isTemp, /* TRUE for a TEMPORARY view */ + int noErr /* Suppress error messages if VIEW already exists */ +){ + Table *p; + int n; + const unsigned char *z; + Token sEnd; + DbFixer sFix; + Token *pName; + int iDb; + sqlite3 *db = pParse->db; + + if( pParse->nVar>0 ){ + sqlite3ErrorMsg(pParse, "parameters are not allowed in views"); + sqlite3SelectDelete(pSelect); + return; + } + sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr); + p = pParse->pNewTable; + if( p==0 || pParse->nErr ){ + sqlite3SelectDelete(pSelect); + return; + } + sqlite3TwoPartName(pParse, pName1, pName2, &pName); + iDb = sqlite3SchemaToIndex(db, p->pSchema); + if( sqlite3FixInit(&sFix, pParse, iDb, "view", pName) + && sqlite3FixSelect(&sFix, pSelect) + ){ + sqlite3SelectDelete(pSelect); + return; + } + + /* Make a copy of the entire SELECT statement that defines the view. + ** This will force all the Expr.token.z values to be dynamically + ** allocated rather than point to the input string - which means that + ** they will persist after the current sqlite3_exec() call returns. + */ + p->pSelect = sqlite3SelectDup(db, pSelect); + sqlite3SelectDelete(pSelect); + if( db->mallocFailed ){ + return; + } + if( !db->init.busy ){ + sqlite3ViewGetColumnNames(pParse, p); + } + + /* Locate the end of the CREATE VIEW statement. Make sEnd point to + ** the end. + */ + sEnd = pParse->sLastToken; + if( sEnd.z[0]!=0 && sEnd.z[0]!=';' ){ + sEnd.z += sEnd.n; + } + sEnd.n = 0; + n = sEnd.z - pBegin->z; + z = (const unsigned char*)pBegin->z; + while( n>0 && (z[n-1]==';' || isspace(z[n-1])) ){ n--; } + sEnd.z = &z[n-1]; + sEnd.n = 1; + + /* Use sqlite3EndTable() to add the view to the SQLITE_MASTER table */ + sqlite3EndTable(pParse, 0, &sEnd, 0); + return; +} +#endif /* SQLITE_OMIT_VIEW */ + +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) +/* +** The Table structure pTable is really a VIEW. Fill in the names of +** the columns of the view in the pTable structure. Return the number +** of errors. If an error is seen leave an error message in pParse->zErrMsg. +*/ +int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ + Table *pSelTab; /* A fake table from which we get the result set */ + Select *pSel; /* Copy of the SELECT that implements the view */ + int nErr = 0; /* Number of errors encountered */ + int n; /* Temporarily holds the number of cursors assigned */ + sqlite3 *db = pParse->db; /* Database connection for malloc errors */ + + assert( pTable ); + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( sqlite3VtabCallConnect(pParse, pTable) ){ + return SQLITE_ERROR; + } + if( IsVirtual(pTable) ) return 0; +#endif + +#ifndef SQLITE_OMIT_VIEW + /* A positive nCol means the columns names for this view are + ** already known. + */ + if( pTable->nCol>0 ) return 0; + + /* A negative nCol is a special marker meaning that we are currently + ** trying to compute the column names. If we enter this routine with + ** a negative nCol, it means two or more views form a loop, like this: + ** + ** CREATE VIEW one AS SELECT * FROM two; + ** CREATE VIEW two AS SELECT * FROM one; + ** + ** Actually, this error is caught previously and so the following test + ** should always fail. But we will leave it in place just to be safe. + */ + if( pTable->nCol<0 ){ + sqlite3ErrorMsg(pParse, "view %s is circularly defined", pTable->zName); + return 1; + } + assert( pTable->nCol>=0 ); + + /* If we get this far, it means we need to compute the table names. + ** Note that the call to sqlite3ResultSetOfSelect() will expand any + ** "*" elements in the results set of the view and will assign cursors + ** to the elements of the FROM clause. But we do not want these changes + ** to be permanent. So the computation is done on a copy of the SELECT + ** statement that defines the view. + */ + assert( pTable->pSelect ); + pSel = sqlite3SelectDup(db, pTable->pSelect); + if( pSel ){ + n = pParse->nTab; + sqlite3SrcListAssignCursors(pParse, pSel->pSrc); + pTable->nCol = -1; + pSelTab = sqlite3ResultSetOfSelect(pParse, 0, pSel); + pParse->nTab = n; + if( pSelTab ){ + assert( pTable->aCol==0 ); + pTable->nCol = pSelTab->nCol; + pTable->aCol = pSelTab->aCol; + pSelTab->nCol = 0; + pSelTab->aCol = 0; + sqlite3DeleteTable(pSelTab); + pTable->pSchema->flags |= DB_UnresetViews; + }else{ + pTable->nCol = 0; + nErr++; + } + sqlite3SelectDelete(pSel); + } else { + nErr++; + } +#endif /* SQLITE_OMIT_VIEW */ + return nErr; +} +#endif /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */ + +#ifndef SQLITE_OMIT_VIEW +/* +** Clear the column names from every VIEW in database idx. +*/ +static void sqliteViewResetAll(sqlite3 *db, int idx){ + HashElem *i; + if( !DbHasProperty(db, idx, DB_UnresetViews) ) return; + for(i=sqliteHashFirst(&db->aDb[idx].pSchema->tblHash); i;i=sqliteHashNext(i)){ + Table *pTab = sqliteHashData(i); + if( pTab->pSelect ){ + sqliteResetColumnNames(pTab); + } + } + DbClearProperty(db, idx, DB_UnresetViews); +} +#else +# define sqliteViewResetAll(A,B) +#endif /* SQLITE_OMIT_VIEW */ + +/* +** This function is called by the VDBE to adjust the internal schema +** used by SQLite when the btree layer moves a table root page. The +** root-page of a table or index in database iDb has changed from iFrom +** to iTo. +** +** Ticket #1728: The symbol table might still contain information +** on tables and/or indices that are the process of being deleted. +** If you are unlucky, one of those deleted indices or tables might +** have the same rootpage number as the real table or index that is +** being moved. So we cannot stop searching after the first match +** because the first match might be for one of the deleted indices +** or tables and not the table/index that is actually being moved. +** We must continue looping until all tables and indices with +** rootpage==iFrom have been converted to have a rootpage of iTo +** in order to be certain that we got the right one. +*/ +#ifndef SQLITE_OMIT_AUTOVACUUM +void sqlite3RootPageMoved(Db *pDb, int iFrom, int iTo){ + HashElem *pElem; + Hash *pHash; + + pHash = &pDb->pSchema->tblHash; + for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ + Table *pTab = sqliteHashData(pElem); + if( pTab->tnum==iFrom ){ + pTab->tnum = iTo; + } + } + pHash = &pDb->pSchema->idxHash; + for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ + Index *pIdx = sqliteHashData(pElem); + if( pIdx->tnum==iFrom ){ + pIdx->tnum = iTo; + } + } +} +#endif + +/* +** Write code to erase the table with root-page iTable from database iDb. +** Also write code to modify the sqlite_master table and internal schema +** if a root-page of another table is moved by the btree-layer whilst +** erasing iTable (this can happen with an auto-vacuum database). +*/ +static void destroyRootPage(Parse *pParse, int iTable, int iDb){ + Vdbe *v = sqlite3GetVdbe(pParse); + sqlite3VdbeAddOp(v, OP_Destroy, iTable, iDb); +#ifndef SQLITE_OMIT_AUTOVACUUM + /* OP_Destroy pushes an integer onto the stack. If this integer + ** is non-zero, then it is the root page number of a table moved to + ** location iTable. The following code modifies the sqlite_master table to + ** reflect this. + ** + ** The "#0" in the SQL is a special constant that means whatever value + ** is on the top of the stack. See sqlite3RegisterExpr(). + */ + sqlite3NestedParse(pParse, + "UPDATE %Q.%s SET rootpage=%d WHERE #0 AND rootpage=#0", + pParse->db->aDb[iDb].zName, SCHEMA_TABLE(iDb), iTable); +#endif +} + +/* +** Write VDBE code to erase table pTab and all associated indices on disk. +** Code to update the sqlite_master tables and internal schema definitions +** in case a root-page belonging to another table is moved by the btree layer +** is also added (this can happen with an auto-vacuum database). +*/ +static void destroyTable(Parse *pParse, Table *pTab){ +#ifdef SQLITE_OMIT_AUTOVACUUM + Index *pIdx; + int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + destroyRootPage(pParse, pTab->tnum, iDb); + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + destroyRootPage(pParse, pIdx->tnum, iDb); + } +#else + /* If the database may be auto-vacuum capable (if SQLITE_OMIT_AUTOVACUUM + ** is not defined), then it is important to call OP_Destroy on the + ** table and index root-pages in order, starting with the numerically + ** largest root-page number. This guarantees that none of the root-pages + ** to be destroyed is relocated by an earlier OP_Destroy. i.e. if the + ** following were coded: + ** + ** OP_Destroy 4 0 + ** ... + ** OP_Destroy 5 0 + ** + ** and root page 5 happened to be the largest root-page number in the + ** database, then root page 5 would be moved to page 4 by the + ** "OP_Destroy 4 0" opcode. The subsequent "OP_Destroy 5 0" would hit + ** a free-list page. + */ + int iTab = pTab->tnum; + int iDestroyed = 0; + + while( 1 ){ + Index *pIdx; + int iLargest = 0; + + if( iDestroyed==0 || iTabpIndex; pIdx; pIdx=pIdx->pNext){ + int iIdx = pIdx->tnum; + assert( pIdx->pSchema==pTab->pSchema ); + if( (iDestroyed==0 || (iIdxiLargest ){ + iLargest = iIdx; + } + } + if( iLargest==0 ){ + return; + }else{ + int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + destroyRootPage(pParse, iLargest, iDb); + iDestroyed = iLargest; + } + } +#endif +} + +/* +** This routine is called to do the work of a DROP TABLE statement. +** pName is the name of the table to be dropped. +*/ +void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, int noErr){ + Table *pTab; + Vdbe *v; + sqlite3 *db = pParse->db; + int iDb; + + if( pParse->nErr || db->mallocFailed ){ + goto exit_drop_table; + } + assert( pName->nSrc==1 ); + pTab = sqlite3LocateTable(pParse, pName->a[0].zName, pName->a[0].zDatabase); + + if( pTab==0 ){ + if( noErr ){ + sqlite3ErrorClear(pParse); + } + goto exit_drop_table; + } + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDb>=0 && iDbnDb ); +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int code; + const char *zTab = SCHEMA_TABLE(iDb); + const char *zDb = db->aDb[iDb].zName; + const char *zArg2 = 0; + if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb)){ + goto exit_drop_table; + } + if( isView ){ + if( !OMIT_TEMPDB && iDb==1 ){ + code = SQLITE_DROP_TEMP_VIEW; + }else{ + code = SQLITE_DROP_VIEW; + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + }else if( IsVirtual(pTab) ){ + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto exit_drop_table; + } + code = SQLITE_DROP_VTABLE; + zArg2 = pTab->pMod->zName; +#endif + }else{ + if( !OMIT_TEMPDB && iDb==1 ){ + code = SQLITE_DROP_TEMP_TABLE; + }else{ + code = SQLITE_DROP_TABLE; + } + } + if( sqlite3AuthCheck(pParse, code, pTab->zName, zArg2, zDb) ){ + goto exit_drop_table; + } + if( sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){ + goto exit_drop_table; + } + } +#endif + if( pTab->readOnly || pTab==db->aDb[iDb].pSchema->pSeqTab ){ + sqlite3ErrorMsg(pParse, "table %s may not be dropped", pTab->zName); + goto exit_drop_table; + } + +#ifndef SQLITE_OMIT_VIEW + /* Ensure DROP TABLE is not used on a view, and DROP VIEW is not used + ** on a table. + */ + if( isView && pTab->pSelect==0 ){ + sqlite3ErrorMsg(pParse, "use DROP TABLE to delete table %s", pTab->zName); + goto exit_drop_table; + } + if( !isView && pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "use DROP VIEW to delete view %s", pTab->zName); + goto exit_drop_table; + } +#endif + + /* Generate code to remove the table from the master table + ** on disk. + */ + v = sqlite3GetVdbe(pParse); + if( v ){ + Trigger *pTrigger; + Db *pDb = &db->aDb[iDb]; + sqlite3BeginWriteOperation(pParse, 0, iDb); + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + Vdbe *v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp(v, OP_VBegin, 0, 0); + } + } +#endif + + /* Drop all triggers associated with the table being dropped. Code + ** is generated to remove entries from sqlite_master and/or + ** sqlite_temp_master if required. + */ + pTrigger = pTab->pTrigger; + while( pTrigger ){ + assert( pTrigger->pSchema==pTab->pSchema || + pTrigger->pSchema==db->aDb[1].pSchema ); + sqlite3DropTriggerPtr(pParse, pTrigger); + pTrigger = pTrigger->pNext; + } + +#ifndef SQLITE_OMIT_AUTOINCREMENT + /* Remove any entries of the sqlite_sequence table associated with + ** the table being dropped. This is done before the table is dropped + ** at the btree level, in case the sqlite_sequence table needs to + ** move as a result of the drop (can happen in auto-vacuum mode). + */ + if( pTab->autoInc ){ + sqlite3NestedParse(pParse, + "DELETE FROM %s.sqlite_sequence WHERE name=%Q", + pDb->zName, pTab->zName + ); + } +#endif + + /* Drop all SQLITE_MASTER table and index entries that refer to the + ** table. The program name loops through the master table and deletes + ** every row that refers to a table of the same name as the one being + ** dropped. Triggers are handled seperately because a trigger can be + ** created in the temp database that refers to a table in another + ** database. + */ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", + pDb->zName, SCHEMA_TABLE(iDb), pTab->zName); + if( !isView && !IsVirtual(pTab) ){ + destroyTable(pParse, pTab); + } + + /* Remove the table entry from SQLite's internal schema and modify + ** the schema cookie. + */ + if( IsVirtual(pTab) ){ + sqlite3VdbeOp3(v, OP_VDestroy, iDb, 0, pTab->zName, 0); + } + sqlite3VdbeOp3(v, OP_DropTable, iDb, 0, pTab->zName, 0); + sqlite3ChangeCookie(db, v, iDb); + } + sqliteViewResetAll(db, iDb); + +exit_drop_table: + sqlite3SrcListDelete(pName); +} + +/* +** This routine is called to create a new foreign key on the table +** currently under construction. pFromCol determines which columns +** in the current table point to the foreign key. If pFromCol==0 then +** connect the key to the last column inserted. pTo is the name of +** the table referred to. pToCol is a list of tables in the other +** pTo table that the foreign key points to. flags contains all +** information about the conflict resolution algorithms specified +** in the ON DELETE, ON UPDATE and ON INSERT clauses. +** +** An FKey structure is created and added to the table currently +** under construction in the pParse->pNewTable field. The new FKey +** is not linked into db->aFKey at this point - that does not happen +** until sqlite3EndTable(). +** +** The foreign key is set for IMMEDIATE processing. A subsequent call +** to sqlite3DeferForeignKey() might change this to DEFERRED. +*/ +void sqlite3CreateForeignKey( + Parse *pParse, /* Parsing context */ + ExprList *pFromCol, /* Columns in this table that point to other table */ + Token *pTo, /* Name of the other table */ + ExprList *pToCol, /* Columns in the other table */ + int flags /* Conflict resolution algorithms. */ +){ +#ifndef SQLITE_OMIT_FOREIGN_KEY + FKey *pFKey = 0; + Table *p = pParse->pNewTable; + int nByte; + int i; + int nCol; + char *z; + + assert( pTo!=0 ); + if( p==0 || pParse->nErr || IN_DECLARE_VTAB ) goto fk_end; + if( pFromCol==0 ){ + int iCol = p->nCol-1; + if( iCol<0 ) goto fk_end; + if( pToCol && pToCol->nExpr!=1 ){ + sqlite3ErrorMsg(pParse, "foreign key on %s" + " should reference only one column of table %T", + p->aCol[iCol].zName, pTo); + goto fk_end; + } + nCol = 1; + }else if( pToCol && pToCol->nExpr!=pFromCol->nExpr ){ + sqlite3ErrorMsg(pParse, + "number of columns in foreign key does not match the number of " + "columns in the referenced table"); + goto fk_end; + }else{ + nCol = pFromCol->nExpr; + } + nByte = sizeof(*pFKey) + nCol*sizeof(pFKey->aCol[0]) + pTo->n + 1; + if( pToCol ){ + for(i=0; inExpr; i++){ + nByte += strlen(pToCol->a[i].zName) + 1; + } + } + pFKey = sqlite3DbMallocZero(pParse->db, nByte ); + if( pFKey==0 ){ + goto fk_end; + } + pFKey->pFrom = p; + pFKey->pNextFrom = p->pFKey; + z = (char*)&pFKey[1]; + pFKey->aCol = (struct sColMap*)z; + z += sizeof(struct sColMap)*nCol; + pFKey->zTo = z; + memcpy(z, pTo->z, pTo->n); + z[pTo->n] = 0; + z += pTo->n+1; + pFKey->pNextTo = 0; + pFKey->nCol = nCol; + if( pFromCol==0 ){ + pFKey->aCol[0].iFrom = p->nCol-1; + }else{ + for(i=0; inCol; j++){ + if( sqlite3StrICmp(p->aCol[j].zName, pFromCol->a[i].zName)==0 ){ + pFKey->aCol[i].iFrom = j; + break; + } + } + if( j>=p->nCol ){ + sqlite3ErrorMsg(pParse, + "unknown column \"%s\" in foreign key definition", + pFromCol->a[i].zName); + goto fk_end; + } + } + } + if( pToCol ){ + for(i=0; ia[i].zName); + pFKey->aCol[i].zCol = z; + memcpy(z, pToCol->a[i].zName, n); + z[n] = 0; + z += n+1; + } + } + pFKey->isDeferred = 0; + pFKey->deleteConf = flags & 0xff; + pFKey->updateConf = (flags >> 8 ) & 0xff; + pFKey->insertConf = (flags >> 16 ) & 0xff; + + /* Link the foreign key to the table as the last step. + */ + p->pFKey = pFKey; + pFKey = 0; + +fk_end: + sqlite3_free(pFKey); +#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ + sqlite3ExprListDelete(pFromCol); + sqlite3ExprListDelete(pToCol); +} + +/* +** This routine is called when an INITIALLY IMMEDIATE or INITIALLY DEFERRED +** clause is seen as part of a foreign key definition. The isDeferred +** parameter is 1 for INITIALLY DEFERRED and 0 for INITIALLY IMMEDIATE. +** The behavior of the most recently created foreign key is adjusted +** accordingly. +*/ +void sqlite3DeferForeignKey(Parse *pParse, int isDeferred){ +#ifndef SQLITE_OMIT_FOREIGN_KEY + Table *pTab; + FKey *pFKey; + if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return; + pFKey->isDeferred = isDeferred; +#endif +} + +/* +** Generate code that will erase and refill index *pIdx. This is +** used to initialize a newly created index or to recompute the +** content of an index in response to a REINDEX command. +** +** if memRootPage is not negative, it means that the index is newly +** created. The memory cell specified by memRootPage contains the +** root page number of the index. If memRootPage is negative, then +** the index already exists and must be cleared before being refilled and +** the root page number of the index is taken from pIndex->tnum. +*/ +static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ + Table *pTab = pIndex->pTable; /* The table that is indexed */ + int iTab = pParse->nTab; /* Btree cursor used for pTab */ + int iIdx = pParse->nTab+1; /* Btree cursor used for pIndex */ + int addr1; /* Address of top of loop */ + int tnum; /* Root page of index */ + Vdbe *v; /* Generate code into this virtual machine */ + KeyInfo *pKey; /* KeyInfo for index */ + sqlite3 *db = pParse->db; /* The database connection */ + int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); + +#ifndef SQLITE_OMIT_AUTHORIZATION + if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0, + db->aDb[iDb].zName ) ){ + return; + } +#endif + + /* Require a write-lock on the table to perform this operation */ + sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); + + v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + if( memRootPage>=0 ){ + sqlite3VdbeAddOp(v, OP_MemLoad, memRootPage, 0); + tnum = 0; + }else{ + tnum = pIndex->tnum; + sqlite3VdbeAddOp(v, OP_Clear, tnum, iDb); + } + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + pKey = sqlite3IndexKeyinfo(pParse, pIndex); + sqlite3VdbeOp3(v, OP_OpenWrite, iIdx, tnum, (char *)pKey, P3_KEYINFO_HANDOFF); + sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); + addr1 = sqlite3VdbeAddOp(v, OP_Rewind, iTab, 0); + sqlite3GenerateIndexKey(v, pIndex, iTab); + if( pIndex->onError!=OE_None ){ + int curaddr = sqlite3VdbeCurrentAddr(v); + int addr2 = curaddr+4; + sqlite3VdbeChangeP2(v, curaddr-1, addr2); + sqlite3VdbeAddOp(v, OP_Rowid, iTab, 0); + sqlite3VdbeAddOp(v, OP_AddImm, 1, 0); + sqlite3VdbeAddOp(v, OP_IsUnique, iIdx, addr2); + sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, OE_Abort, + "indexed columns are not unique", P3_STATIC); + assert( db->mallocFailed || addr2==sqlite3VdbeCurrentAddr(v) ); + } + sqlite3VdbeAddOp(v, OP_IdxInsert, iIdx, 0); + sqlite3VdbeAddOp(v, OP_Next, iTab, addr1+1); + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp(v, OP_Close, iTab, 0); + sqlite3VdbeAddOp(v, OP_Close, iIdx, 0); +} + +/* +** Create a new index for an SQL table. pName1.pName2 is the name of the index +** and pTblList is the name of the table that is to be indexed. Both will +** be NULL for a primary key or an index that is created to satisfy a +** UNIQUE constraint. If pTable and pIndex are NULL, use pParse->pNewTable +** as the table to be indexed. pParse->pNewTable is a table that is +** currently being constructed by a CREATE TABLE statement. +** +** pList is a list of columns to be indexed. pList will be NULL if this +** is a primary key or unique-constraint on the most recent column added +** to the table currently under construction. +*/ +void sqlite3CreateIndex( + Parse *pParse, /* All information about this parse */ + Token *pName1, /* First part of index name. May be NULL */ + Token *pName2, /* Second part of index name. May be NULL */ + SrcList *pTblName, /* Table to index. Use pParse->pNewTable if 0 */ + ExprList *pList, /* A list of columns to be indexed */ + int onError, /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ + Token *pStart, /* The CREATE token that begins this statement */ + Token *pEnd, /* The ")" that closes the CREATE INDEX statement */ + int sortOrder, /* Sort order of primary key when pList==NULL */ + int ifNotExist /* Omit error if index already exists */ +){ + Table *pTab = 0; /* Table to be indexed */ + Index *pIndex = 0; /* The index to be created */ + char *zName = 0; /* Name of the index */ + int nName; /* Number of characters in zName */ + int i, j; + Token nullId; /* Fake token for an empty ID list */ + DbFixer sFix; /* For assigning database names to pTable */ + int sortOrderMask; /* 1 to honor DESC in index. 0 to ignore. */ + sqlite3 *db = pParse->db; + Db *pDb; /* The specific table containing the indexed database */ + int iDb; /* Index of the database that is being written */ + Token *pName = 0; /* Unqualified name of the index to create */ + struct ExprList_item *pListItem; /* For looping over pList */ + int nCol; + int nExtra = 0; + char *zExtra; + + if( pParse->nErr || db->mallocFailed || IN_DECLARE_VTAB ){ + goto exit_create_index; + } + + /* + ** Find the table that is to be indexed. Return early if not found. + */ + if( pTblName!=0 ){ + + /* Use the two-part index name to determine the database + ** to search for the table. 'Fix' the table name to this db + ** before looking up the table. + */ + assert( pName1 && pName2 ); + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); + if( iDb<0 ) goto exit_create_index; + +#ifndef SQLITE_OMIT_TEMPDB + /* If the index name was unqualified, check if the the table + ** is a temp table. If so, set the database to 1. + */ + pTab = sqlite3SrcListLookup(pParse, pTblName); + if( pName2 && pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){ + iDb = 1; + } +#endif + + if( sqlite3FixInit(&sFix, pParse, iDb, "index", pName) && + sqlite3FixSrcList(&sFix, pTblName) + ){ + /* Because the parser constructs pTblName from a single identifier, + ** sqlite3FixSrcList can never fail. */ + assert(0); + } + pTab = sqlite3LocateTable(pParse, pTblName->a[0].zName, + pTblName->a[0].zDatabase); + if( !pTab ) goto exit_create_index; + assert( db->aDb[iDb].pSchema==pTab->pSchema ); + }else{ + assert( pName==0 ); + pTab = pParse->pNewTable; + if( !pTab ) goto exit_create_index; + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + } + pDb = &db->aDb[iDb]; + + if( pTab==0 || pParse->nErr ) goto exit_create_index; + if( pTab->readOnly ){ + sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); + goto exit_create_index; + } +#ifndef SQLITE_OMIT_VIEW + if( pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "views may not be indexed"); + goto exit_create_index; + } +#endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + sqlite3ErrorMsg(pParse, "virtual tables may not be indexed"); + goto exit_create_index; + } +#endif + + /* + ** Find the name of the index. Make sure there is not already another + ** index or table with the same name. + ** + ** Exception: If we are reading the names of permanent indices from the + ** sqlite_master table (because some other process changed the schema) and + ** one of the index names collides with the name of a temporary table or + ** index, then we will continue to process this index. + ** + ** If pName==0 it means that we are + ** dealing with a primary key or UNIQUE constraint. We have to invent our + ** own name. + */ + if( pName ){ + zName = sqlite3NameFromToken(db, pName); + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ) goto exit_create_index; + if( zName==0 ) goto exit_create_index; + if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ + goto exit_create_index; + } + if( !db->init.busy ){ + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ) goto exit_create_index; + if( sqlite3FindTable(db, zName, 0)!=0 ){ + sqlite3ErrorMsg(pParse, "there is already a table named %s", zName); + goto exit_create_index; + } + } + if( sqlite3FindIndex(db, zName, pDb->zName)!=0 ){ + if( !ifNotExist ){ + sqlite3ErrorMsg(pParse, "index %s already exists", zName); + } + goto exit_create_index; + } + }else{ + char zBuf[30]; + int n; + Index *pLoop; + for(pLoop=pTab->pIndex, n=1; pLoop; pLoop=pLoop->pNext, n++){} + sqlite3_snprintf(sizeof(zBuf),zBuf,"_%d",n); + zName = 0; + sqlite3SetString(&zName, "sqlite_autoindex_", pTab->zName, zBuf, (char*)0); + if( zName==0 ){ + db->mallocFailed = 1; + goto exit_create_index; + } + } + + /* Check for authorization to create an index. + */ +#ifndef SQLITE_OMIT_AUTHORIZATION + { + const char *zDb = pDb->zName; + if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iDb), 0, zDb) ){ + goto exit_create_index; + } + i = SQLITE_CREATE_INDEX; + if( !OMIT_TEMPDB && iDb==1 ) i = SQLITE_CREATE_TEMP_INDEX; + if( sqlite3AuthCheck(pParse, i, zName, pTab->zName, zDb) ){ + goto exit_create_index; + } + } +#endif + + /* If pList==0, it means this routine was called to make a primary + ** key out of the last column added to the table under construction. + ** So create a fake list to simulate this. + */ + if( pList==0 ){ + nullId.z = (u8*)pTab->aCol[pTab->nCol-1].zName; + nullId.n = strlen((char*)nullId.z); + pList = sqlite3ExprListAppend(pParse, 0, 0, &nullId); + if( pList==0 ) goto exit_create_index; + pList->a[0].sortOrder = sortOrder; + } + + /* Figure out how many bytes of space are required to store explicitly + ** specified collation sequence names. + */ + for(i=0; inExpr; i++){ + Expr *pExpr = pList->a[i].pExpr; + if( pExpr ){ + nExtra += (1 + strlen(pExpr->pColl->zName)); + } + } + + /* + ** Allocate the index structure. + */ + nName = strlen(zName); + nCol = pList->nExpr; + pIndex = sqlite3DbMallocZero(db, + sizeof(Index) + /* Index structure */ + sizeof(int)*nCol + /* Index.aiColumn */ + sizeof(int)*(nCol+1) + /* Index.aiRowEst */ + sizeof(char *)*nCol + /* Index.azColl */ + sizeof(u8)*nCol + /* Index.aSortOrder */ + nName + 1 + /* Index.zName */ + nExtra /* Collation sequence names */ + ); + if( db->mallocFailed ){ + goto exit_create_index; + } + pIndex->azColl = (char**)(&pIndex[1]); + pIndex->aiColumn = (int *)(&pIndex->azColl[nCol]); + pIndex->aiRowEst = (unsigned *)(&pIndex->aiColumn[nCol]); + pIndex->aSortOrder = (u8 *)(&pIndex->aiRowEst[nCol+1]); + pIndex->zName = (char *)(&pIndex->aSortOrder[nCol]); + zExtra = (char *)(&pIndex->zName[nName+1]); + memcpy(pIndex->zName, zName, nName+1); + pIndex->pTable = pTab; + pIndex->nColumn = pList->nExpr; + pIndex->onError = onError; + pIndex->autoIndex = pName==0; + pIndex->pSchema = db->aDb[iDb].pSchema; + + /* Check to see if we should honor DESC requests on index columns + */ + if( pDb->pSchema->file_format>=4 ){ + sortOrderMask = -1; /* Honor DESC */ + }else{ + sortOrderMask = 0; /* Ignore DESC */ + } + + /* Scan the names of the columns of the table to be indexed and + ** load the column indices into the Index structure. Report an error + ** if any column is not found. + */ + for(i=0, pListItem=pList->a; inExpr; i++, pListItem++){ + const char *zColName = pListItem->zName; + Column *pTabCol; + int requestedSortOrder; + char *zColl; /* Collation sequence name */ + + for(j=0, pTabCol=pTab->aCol; jnCol; j++, pTabCol++){ + if( sqlite3StrICmp(zColName, pTabCol->zName)==0 ) break; + } + if( j>=pTab->nCol ){ + sqlite3ErrorMsg(pParse, "table %s has no column named %s", + pTab->zName, zColName); + goto exit_create_index; + } + /* TODO: Add a test to make sure that the same column is not named + ** more than once within the same index. Only the first instance of + ** the column will ever be used by the optimizer. Note that using the + ** same column more than once cannot be an error because that would + ** break backwards compatibility - it needs to be a warning. + */ + pIndex->aiColumn[i] = j; + if( pListItem->pExpr ){ + assert( pListItem->pExpr->pColl ); + zColl = zExtra; + sqlite3_snprintf(nExtra, zExtra, "%s", pListItem->pExpr->pColl->zName); + zExtra += (strlen(zColl) + 1); + }else{ + zColl = pTab->aCol[j].zColl; + if( !zColl ){ + zColl = db->pDfltColl->zName; + } + } + if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl, -1) ){ + goto exit_create_index; + } + pIndex->azColl[i] = zColl; + requestedSortOrder = pListItem->sortOrder & sortOrderMask; + pIndex->aSortOrder[i] = requestedSortOrder; + } + sqlite3DefaultRowEst(pIndex); + + if( pTab==pParse->pNewTable ){ + /* This routine has been called to create an automatic index as a + ** result of a PRIMARY KEY or UNIQUE clause on a column definition, or + ** a PRIMARY KEY or UNIQUE clause following the column definitions. + ** i.e. one of: + ** + ** CREATE TABLE t(x PRIMARY KEY, y); + ** CREATE TABLE t(x, y, UNIQUE(x, y)); + ** + ** Either way, check to see if the table already has such an index. If + ** so, don't bother creating this one. This only applies to + ** automatically created indices. Users can do as they wish with + ** explicit indices. + */ + Index *pIdx; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int k; + assert( pIdx->onError!=OE_None ); + assert( pIdx->autoIndex ); + assert( pIndex->onError!=OE_None ); + + if( pIdx->nColumn!=pIndex->nColumn ) continue; + for(k=0; knColumn; k++){ + const char *z1 = pIdx->azColl[k]; + const char *z2 = pIndex->azColl[k]; + if( pIdx->aiColumn[k]!=pIndex->aiColumn[k] ) break; + if( pIdx->aSortOrder[k]!=pIndex->aSortOrder[k] ) break; + if( z1!=z2 && sqlite3StrICmp(z1, z2) ) break; + } + if( k==pIdx->nColumn ){ + if( pIdx->onError!=pIndex->onError ){ + /* This constraint creates the same index as a previous + ** constraint specified somewhere in the CREATE TABLE statement. + ** However the ON CONFLICT clauses are different. If both this + ** constraint and the previous equivalent constraint have explicit + ** ON CONFLICT clauses this is an error. Otherwise, use the + ** explicitly specified behaviour for the index. + */ + if( !(pIdx->onError==OE_Default || pIndex->onError==OE_Default) ){ + sqlite3ErrorMsg(pParse, + "conflicting ON CONFLICT clauses specified", 0); + } + if( pIdx->onError==OE_Default ){ + pIdx->onError = pIndex->onError; + } + } + goto exit_create_index; + } + } + } + + /* Link the new Index structure to its table and to the other + ** in-memory database structures. + */ + if( db->init.busy ){ + Index *p; + p = sqlite3HashInsert(&pIndex->pSchema->idxHash, + pIndex->zName, strlen(pIndex->zName)+1, pIndex); + if( p ){ + assert( p==pIndex ); /* Malloc must have failed */ + db->mallocFailed = 1; + goto exit_create_index; + } + db->flags |= SQLITE_InternChanges; + if( pTblName!=0 ){ + pIndex->tnum = db->init.newTnum; + } + } + + /* If the db->init.busy is 0 then create the index on disk. This + ** involves writing the index into the master table and filling in the + ** index with the current table contents. + ** + ** The db->init.busy is 0 when the user first enters a CREATE INDEX + ** command. db->init.busy is 1 when a database is opened and + ** CREATE INDEX statements are read out of the master table. In + ** the latter case the index already exists on disk, which is why + ** we don't want to recreate it. + ** + ** If pTblName==0 it means this index is generated as a primary key + ** or UNIQUE constraint of a CREATE TABLE statement. Since the table + ** has just been created, it contains no data and the index initialization + ** step can be skipped. + */ + else if( db->init.busy==0 ){ + Vdbe *v; + char *zStmt; + int iMem = pParse->nMem++; + + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto exit_create_index; + + + /* Create the rootpage for the index + */ + sqlite3BeginWriteOperation(pParse, 1, iDb); + sqlite3VdbeAddOp(v, OP_CreateIndex, iDb, 0); + sqlite3VdbeAddOp(v, OP_MemStore, iMem, 0); + + /* Gather the complete text of the CREATE INDEX statement into + ** the zStmt variable + */ + if( pStart && pEnd ){ + /* A named index with an explicit CREATE INDEX statement */ + zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s", + onError==OE_None ? "" : " UNIQUE", + pEnd->z - pName->z + 1, + pName->z); + }else{ + /* An automatic index created by a PRIMARY KEY or UNIQUE constraint */ + /* zStmt = sqlite3MPrintf(""); */ + zStmt = 0; + } + + /* Add an entry in sqlite_master for this index + */ + sqlite3NestedParse(pParse, + "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#0,%Q);", + db->aDb[iDb].zName, SCHEMA_TABLE(iDb), + pIndex->zName, + pTab->zName, + zStmt + ); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3_free(zStmt); + + /* Fill the index with data and reparse the schema. Code an OP_Expire + ** to invalidate all pre-compiled statements. + */ + if( pTblName ){ + sqlite3RefillIndex(pParse, pIndex, iMem); + sqlite3ChangeCookie(db, v, iDb); + sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 0, + sqlite3MPrintf(db, "name='%q'", pIndex->zName), P3_DYNAMIC); + sqlite3VdbeAddOp(v, OP_Expire, 0, 0); + } + } + + /* When adding an index to the list of indices for a table, make + ** sure all indices labeled OE_Replace come after all those labeled + ** OE_Ignore. This is necessary for the correct operation of UPDATE + ** and INSERT. + */ + if( db->init.busy || pTblName==0 ){ + if( onError!=OE_Replace || pTab->pIndex==0 + || pTab->pIndex->onError==OE_Replace){ + pIndex->pNext = pTab->pIndex; + pTab->pIndex = pIndex; + }else{ + Index *pOther = pTab->pIndex; + while( pOther->pNext && pOther->pNext->onError!=OE_Replace ){ + pOther = pOther->pNext; + } + pIndex->pNext = pOther->pNext; + pOther->pNext = pIndex; + } + pIndex = 0; + } + + /* Clean up before exiting */ +exit_create_index: + if( pIndex ){ + freeIndex(pIndex); + } + sqlite3ExprListDelete(pList); + sqlite3SrcListDelete(pTblName); + sqlite3_free(zName); + return; +} + +/* +** Generate code to make sure the file format number is at least minFormat. +** The generated code will increase the file format number if necessary. +*/ +void sqlite3MinimumFileFormat(Parse *pParse, int iDb, int minFormat){ + Vdbe *v; + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp(v, OP_ReadCookie, iDb, 1); + sqlite3VdbeUsesBtree(v, iDb); + sqlite3VdbeAddOp(v, OP_Integer, minFormat, 0); + sqlite3VdbeAddOp(v, OP_Ge, 0, sqlite3VdbeCurrentAddr(v)+3); + sqlite3VdbeAddOp(v, OP_Integer, minFormat, 0); + sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 1); + } +} + +/* +** Fill the Index.aiRowEst[] array with default information - information +** to be used when we have not run the ANALYZE command. +** +** aiRowEst[0] is suppose to contain the number of elements in the index. +** Since we do not know, guess 1 million. aiRowEst[1] is an estimate of the +** number of rows in the table that match any particular value of the +** first column of the index. aiRowEst[2] is an estimate of the number +** of rows that match any particular combiniation of the first 2 columns +** of the index. And so forth. It must always be the case that +* +** aiRowEst[N]<=aiRowEst[N-1] +** aiRowEst[N]>=1 +** +** Apart from that, we have little to go on besides intuition as to +** how aiRowEst[] should be initialized. The numbers generated here +** are based on typical values found in actual indices. +*/ +void sqlite3DefaultRowEst(Index *pIdx){ + unsigned *a = pIdx->aiRowEst; + int i; + assert( a!=0 ); + a[0] = 1000000; + for(i=pIdx->nColumn; i>=5; i--){ + a[i] = 5; + } + while( i>=1 ){ + a[i] = 11 - i; + i--; + } + if( pIdx->onError!=OE_None ){ + a[pIdx->nColumn] = 1; + } +} + +/* +** This routine will drop an existing named index. This routine +** implements the DROP INDEX statement. +*/ +void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists){ + Index *pIndex; + Vdbe *v; + sqlite3 *db = pParse->db; + int iDb; + + if( pParse->nErr || db->mallocFailed ){ + goto exit_drop_index; + } + assert( pName->nSrc==1 ); + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + goto exit_drop_index; + } + pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); + if( pIndex==0 ){ + if( !ifExists ){ + sqlite3ErrorMsg(pParse, "no such index: %S", pName, 0); + } + pParse->checkSchema = 1; + goto exit_drop_index; + } + if( pIndex->autoIndex ){ + sqlite3ErrorMsg(pParse, "index associated with UNIQUE " + "or PRIMARY KEY constraint cannot be dropped", 0); + goto exit_drop_index; + } + iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int code = SQLITE_DROP_INDEX; + Table *pTab = pIndex->pTable; + const char *zDb = db->aDb[iDb].zName; + const char *zTab = SCHEMA_TABLE(iDb); + if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ + goto exit_drop_index; + } + if( !OMIT_TEMPDB && iDb ) code = SQLITE_DROP_TEMP_INDEX; + if( sqlite3AuthCheck(pParse, code, pIndex->zName, pTab->zName, zDb) ){ + goto exit_drop_index; + } + } +#endif + + /* Generate code to remove the index and from the master table */ + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.%s WHERE name=%Q", + db->aDb[iDb].zName, SCHEMA_TABLE(iDb), + pIndex->zName + ); + sqlite3ChangeCookie(db, v, iDb); + destroyRootPage(pParse, pIndex->tnum, iDb); + sqlite3VdbeOp3(v, OP_DropIndex, iDb, 0, pIndex->zName, 0); + } + +exit_drop_index: + sqlite3SrcListDelete(pName); +} + +/* +** pArray is a pointer to an array of objects. Each object in the +** array is szEntry bytes in size. This routine allocates a new +** object on the end of the array. +** +** *pnEntry is the number of entries already in use. *pnAlloc is +** the previously allocated size of the array. initSize is the +** suggested initial array size allocation. +** +** The index of the new entry is returned in *pIdx. +** +** This routine returns a pointer to the array of objects. This +** might be the same as the pArray parameter or it might be a different +** pointer if the array was resized. +*/ +void *sqlite3ArrayAllocate( + sqlite3 *db, /* Connection to notify of malloc failures */ + void *pArray, /* Array of objects. Might be reallocated */ + int szEntry, /* Size of each object in the array */ + int initSize, /* Suggested initial allocation, in elements */ + int *pnEntry, /* Number of objects currently in use */ + int *pnAlloc, /* Current size of the allocation, in elements */ + int *pIdx /* Write the index of a new slot here */ +){ + char *z; + if( *pnEntry >= *pnAlloc ){ + void *pNew; + int newSize; + newSize = (*pnAlloc)*2 + initSize; + pNew = sqlite3DbRealloc(db, pArray, newSize*szEntry); + if( pNew==0 ){ + *pIdx = -1; + return pArray; + } + *pnAlloc = newSize; + pArray = pNew; + } + z = (char*)pArray; + memset(&z[*pnEntry * szEntry], 0, szEntry); + *pIdx = *pnEntry; + ++*pnEntry; + return pArray; +} + +/* +** Append a new element to the given IdList. Create a new IdList if +** need be. +** +** A new IdList is returned, or NULL if malloc() fails. +*/ +IdList *sqlite3IdListAppend(sqlite3 *db, IdList *pList, Token *pToken){ + int i; + if( pList==0 ){ + pList = sqlite3DbMallocZero(db, sizeof(IdList) ); + if( pList==0 ) return 0; + pList->nAlloc = 0; + } + pList->a = sqlite3ArrayAllocate( + db, + pList->a, + sizeof(pList->a[0]), + 5, + &pList->nId, + &pList->nAlloc, + &i + ); + if( i<0 ){ + sqlite3IdListDelete(pList); + return 0; + } + pList->a[i].zName = sqlite3NameFromToken(db, pToken); + return pList; +} + +/* +** Delete an IdList. +*/ +void sqlite3IdListDelete(IdList *pList){ + int i; + if( pList==0 ) return; + for(i=0; inId; i++){ + sqlite3_free(pList->a[i].zName); + } + sqlite3_free(pList->a); + sqlite3_free(pList); +} + +/* +** Return the index in pList of the identifier named zId. Return -1 +** if not found. +*/ +int sqlite3IdListIndex(IdList *pList, const char *zName){ + int i; + if( pList==0 ) return -1; + for(i=0; inId; i++){ + if( sqlite3StrICmp(pList->a[i].zName, zName)==0 ) return i; + } + return -1; +} + +/* +** Append a new table name to the given SrcList. Create a new SrcList if +** need be. A new entry is created in the SrcList even if pToken is NULL. +** +** A new SrcList is returned, or NULL if malloc() fails. +** +** If pDatabase is not null, it means that the table has an optional +** database name prefix. Like this: "database.table". The pDatabase +** points to the table name and the pTable points to the database name. +** The SrcList.a[].zName field is filled with the table name which might +** come from pTable (if pDatabase is NULL) or from pDatabase. +** SrcList.a[].zDatabase is filled with the database name from pTable, +** or with NULL if no database is specified. +** +** In other words, if call like this: +** +** sqlite3SrcListAppend(D,A,B,0); +** +** Then B is a table name and the database name is unspecified. If called +** like this: +** +** sqlite3SrcListAppend(D,A,B,C); +** +** Then C is the table name and B is the database name. +*/ +SrcList *sqlite3SrcListAppend( + sqlite3 *db, /* Connection to notify of malloc failures */ + SrcList *pList, /* Append to this SrcList. NULL creates a new SrcList */ + Token *pTable, /* Table to append */ + Token *pDatabase /* Database of the table */ +){ + struct SrcList_item *pItem; + if( pList==0 ){ + pList = sqlite3DbMallocZero(db, sizeof(SrcList) ); + if( pList==0 ) return 0; + pList->nAlloc = 1; + } + if( pList->nSrc>=pList->nAlloc ){ + SrcList *pNew; + pList->nAlloc *= 2; + pNew = sqlite3DbRealloc(db, pList, + sizeof(*pList) + (pList->nAlloc-1)*sizeof(pList->a[0]) ); + if( pNew==0 ){ + sqlite3SrcListDelete(pList); + return 0; + } + pList = pNew; + } + pItem = &pList->a[pList->nSrc]; + memset(pItem, 0, sizeof(pList->a[0])); + if( pDatabase && pDatabase->z==0 ){ + pDatabase = 0; + } + if( pDatabase && pTable ){ + Token *pTemp = pDatabase; + pDatabase = pTable; + pTable = pTemp; + } + pItem->zName = sqlite3NameFromToken(db, pTable); + pItem->zDatabase = sqlite3NameFromToken(db, pDatabase); + pItem->iCursor = -1; + pItem->isPopulated = 0; + pList->nSrc++; + return pList; +} + +/* +** Assign cursors to all tables in a SrcList +*/ +void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ + int i; + struct SrcList_item *pItem; + assert(pList || pParse->db->mallocFailed ); + if( pList ){ + for(i=0, pItem=pList->a; inSrc; i++, pItem++){ + if( pItem->iCursor>=0 ) break; + pItem->iCursor = pParse->nTab++; + if( pItem->pSelect ){ + sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); + } + } + } +} + +/* +** Delete an entire SrcList including all its substructure. +*/ +void sqlite3SrcListDelete(SrcList *pList){ + int i; + struct SrcList_item *pItem; + if( pList==0 ) return; + for(pItem=pList->a, i=0; inSrc; i++, pItem++){ + sqlite3_free(pItem->zDatabase); + sqlite3_free(pItem->zName); + sqlite3_free(pItem->zAlias); + sqlite3DeleteTable(pItem->pTab); + sqlite3SelectDelete(pItem->pSelect); + sqlite3ExprDelete(pItem->pOn); + sqlite3IdListDelete(pItem->pUsing); + } + sqlite3_free(pList); +} + +/* +** This routine is called by the parser to add a new term to the +** end of a growing FROM clause. The "p" parameter is the part of +** the FROM clause that has already been constructed. "p" is NULL +** if this is the first term of the FROM clause. pTable and pDatabase +** are the name of the table and database named in the FROM clause term. +** pDatabase is NULL if the database name qualifier is missing - the +** usual case. If the term has a alias, then pAlias points to the +** alias token. If the term is a subquery, then pSubquery is the +** SELECT statement that the subquery encodes. The pTable and +** pDatabase parameters are NULL for subqueries. The pOn and pUsing +** parameters are the content of the ON and USING clauses. +** +** Return a new SrcList which encodes is the FROM with the new +** term added. +*/ +SrcList *sqlite3SrcListAppendFromTerm( + Parse *pParse, /* Parsing context */ + SrcList *p, /* The left part of the FROM clause already seen */ + Token *pTable, /* Name of the table to add to the FROM clause */ + Token *pDatabase, /* Name of the database containing pTable */ + Token *pAlias, /* The right-hand side of the AS subexpression */ + Select *pSubquery, /* A subquery used in place of a table name */ + Expr *pOn, /* The ON clause of a join */ + IdList *pUsing /* The USING clause of a join */ +){ + struct SrcList_item *pItem; + sqlite3 *db = pParse->db; + p = sqlite3SrcListAppend(db, p, pTable, pDatabase); + if( p==0 || p->nSrc==0 ){ + sqlite3ExprDelete(pOn); + sqlite3IdListDelete(pUsing); + sqlite3SelectDelete(pSubquery); + return p; + } + pItem = &p->a[p->nSrc-1]; + if( pAlias && pAlias->n ){ + pItem->zAlias = sqlite3NameFromToken(db, pAlias); + } + pItem->pSelect = pSubquery; + pItem->pOn = pOn; + pItem->pUsing = pUsing; + return p; +} + +/* +** When building up a FROM clause in the parser, the join operator +** is initially attached to the left operand. But the code generator +** expects the join operator to be on the right operand. This routine +** Shifts all join operators from left to right for an entire FROM +** clause. +** +** Example: Suppose the join is like this: +** +** A natural cross join B +** +** The operator is "natural cross join". The A and B operands are stored +** in p->a[0] and p->a[1], respectively. The parser initially stores the +** operator with A. This routine shifts that operator over to B. +*/ +void sqlite3SrcListShiftJoinType(SrcList *p){ + if( p && p->a ){ + int i; + for(i=p->nSrc-1; i>0; i--){ + p->a[i].jointype = p->a[i-1].jointype; + } + p->a[0].jointype = 0; + } +} + +/* +** Begin a transaction +*/ +void sqlite3BeginTransaction(Parse *pParse, int type){ + sqlite3 *db; + Vdbe *v; + int i; + + if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return; + if( pParse->nErr || db->mallocFailed ) return; + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ) return; + + v = sqlite3GetVdbe(pParse); + if( !v ) return; + if( type!=TK_DEFERRED ){ + for(i=0; inDb; i++){ + sqlite3VdbeAddOp(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1); + sqlite3VdbeUsesBtree(v, i); + } + } + sqlite3VdbeAddOp(v, OP_AutoCommit, 0, 0); +} + +/* +** Commit a transaction +*/ +void sqlite3CommitTransaction(Parse *pParse){ + sqlite3 *db; + Vdbe *v; + + if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return; + if( pParse->nErr || db->mallocFailed ) return; + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "COMMIT", 0, 0) ) return; + + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp(v, OP_AutoCommit, 1, 0); + } +} + +/* +** Rollback a transaction +*/ +void sqlite3RollbackTransaction(Parse *pParse){ + sqlite3 *db; + Vdbe *v; + + if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return; + if( pParse->nErr || db->mallocFailed ) return; + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "ROLLBACK", 0, 0) ) return; + + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp(v, OP_AutoCommit, 1, 1); + } +} + +/* +** Make sure the TEMP database is open and available for use. Return +** the number of errors. Leave any error messages in the pParse structure. +*/ +int sqlite3OpenTempDatabase(Parse *pParse){ + sqlite3 *db = pParse->db; + if( db->aDb[1].pBt==0 && !pParse->explain ){ + int rc; + static const int flags = + SQLITE_OPEN_READWRITE | + SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | + SQLITE_OPEN_DELETEONCLOSE | + SQLITE_OPEN_TEMP_DB; + + rc = sqlite3BtreeFactory(db, 0, 0, SQLITE_DEFAULT_CACHE_SIZE, flags, + &db->aDb[1].pBt); + if( rc!=SQLITE_OK ){ + sqlite3ErrorMsg(pParse, "unable to open a temporary database " + "file for storing temporary tables"); + pParse->rc = rc; + return 1; + } + if( db->flags & !db->autoCommit ){ + rc = sqlite3BtreeBeginTrans(db->aDb[1].pBt, 1); + if( rc!=SQLITE_OK ){ + sqlite3ErrorMsg(pParse, "unable to get a write lock on " + "the temporary database file"); + pParse->rc = rc; + return 1; + } + } + assert( db->aDb[1].pSchema ); + } + return 0; +} + +/* +** Generate VDBE code that will verify the schema cookie and start +** a read-transaction for all named database files. +** +** It is important that all schema cookies be verified and all +** read transactions be started before anything else happens in +** the VDBE program. But this routine can be called after much other +** code has been generated. So here is what we do: +** +** The first time this routine is called, we code an OP_Goto that +** will jump to a subroutine at the end of the program. Then we +** record every database that needs its schema verified in the +** pParse->cookieMask field. Later, after all other code has been +** generated, the subroutine that does the cookie verifications and +** starts the transactions will be coded and the OP_Goto P2 value +** will be made to point to that subroutine. The generation of the +** cookie verification subroutine code happens in sqlite3FinishCoding(). +** +** If iDb<0 then code the OP_Goto only - don't set flag to verify the +** schema on any databases. This can be used to position the OP_Goto +** early in the code, before we know if any database tables will be used. +*/ +void sqlite3CodeVerifySchema(Parse *pParse, int iDb){ + sqlite3 *db; + Vdbe *v; + int mask; + + v = sqlite3GetVdbe(pParse); + if( v==0 ) return; /* This only happens if there was a prior error */ + db = pParse->db; + if( pParse->cookieGoto==0 ){ + pParse->cookieGoto = sqlite3VdbeAddOp(v, OP_Goto, 0, 0)+1; + } + if( iDb>=0 ){ + assert( iDbnDb ); + assert( db->aDb[iDb].pBt!=0 || iDb==1 ); + assert( iDbcookieMask & mask)==0 ){ + pParse->cookieMask |= mask; + pParse->cookieValue[iDb] = db->aDb[iDb].pSchema->schema_cookie; + if( !OMIT_TEMPDB && iDb==1 ){ + sqlite3OpenTempDatabase(pParse); + } + } + } +} + +/* +** Generate VDBE code that prepares for doing an operation that +** might change the database. +** +** This routine starts a new transaction if we are not already within +** a transaction. If we are already within a transaction, then a checkpoint +** is set if the setStatement parameter is true. A checkpoint should +** be set for operations that might fail (due to a constraint) part of +** the way through and which will need to undo some writes without having to +** rollback the whole transaction. For operations where all constraints +** can be checked before any changes are made to the database, it is never +** necessary to undo a write and the checkpoint should not be set. +** +** Only database iDb and the temp database are made writable by this call. +** If iDb==0, then the main and temp databases are made writable. If +** iDb==1 then only the temp database is made writable. If iDb>1 then the +** specified auxiliary database and the temp database are made writable. +*/ +void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){ + Vdbe *v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + sqlite3CodeVerifySchema(pParse, iDb); + pParse->writeMask |= 1<nested==0 ){ + sqlite3VdbeAddOp(v, OP_Statement, iDb, 0); + } + if( (OMIT_TEMPDB || iDb!=1) && pParse->db->aDb[1].pBt!=0 ){ + sqlite3BeginWriteOperation(pParse, setStatement, 1); + } +} + +/* +** Check to see if pIndex uses the collating sequence pColl. Return +** true if it does and false if it does not. +*/ +#ifndef SQLITE_OMIT_REINDEX +static int collationMatch(const char *zColl, Index *pIndex){ + int i; + for(i=0; inColumn; i++){ + const char *z = pIndex->azColl[i]; + if( z==zColl || (z && zColl && 0==sqlite3StrICmp(z, zColl)) ){ + return 1; + } + } + return 0; +} +#endif + +/* +** Recompute all indices of pTab that use the collating sequence pColl. +** If pColl==0 then recompute all indices of pTab. +*/ +#ifndef SQLITE_OMIT_REINDEX +static void reindexTable(Parse *pParse, Table *pTab, char const *zColl){ + Index *pIndex; /* An index associated with pTab */ + + for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ + if( zColl==0 || collationMatch(zColl, pIndex) ){ + int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3RefillIndex(pParse, pIndex, -1); + } + } +} +#endif + +/* +** Recompute all indices of all tables in all databases where the +** indices use the collating sequence pColl. If pColl==0 then recompute +** all indices everywhere. +*/ +#ifndef SQLITE_OMIT_REINDEX +static void reindexDatabases(Parse *pParse, char const *zColl){ + Db *pDb; /* A single database */ + int iDb; /* The database index number */ + sqlite3 *db = pParse->db; /* The database connection */ + HashElem *k; /* For looping over tables in pDb */ + Table *pTab; /* A table in the database */ + + for(iDb=0, pDb=db->aDb; iDbnDb; iDb++, pDb++){ + assert( pDb!=0 ); + for(k=sqliteHashFirst(&pDb->pSchema->tblHash); k; k=sqliteHashNext(k)){ + pTab = (Table*)sqliteHashData(k); + reindexTable(pParse, pTab, zColl); + } + } +} +#endif + +/* +** Generate code for the REINDEX command. +** +** REINDEX -- 1 +** REINDEX -- 2 +** REINDEX ?.? -- 3 +** REINDEX ?.? -- 4 +** +** Form 1 causes all indices in all attached databases to be rebuilt. +** Form 2 rebuilds all indices in all databases that use the named +** collating function. Forms 3 and 4 rebuild the named index or all +** indices associated with the named table. +*/ +#ifndef SQLITE_OMIT_REINDEX +void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ + CollSeq *pColl; /* Collating sequence to be reindexed, or NULL */ + char *z; /* Name of a table or index */ + const char *zDb; /* Name of the database */ + Table *pTab; /* A table in the database */ + Index *pIndex; /* An index associated with pTab */ + int iDb; /* The database index number */ + sqlite3 *db = pParse->db; /* The database connection */ + Token *pObjName; /* Name of the table or index to be reindexed */ + + /* Read the database schema. If an error occurs, leave an error message + ** and code in pParse and return NULL. */ + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + return; + } + + if( pName1==0 || pName1->z==0 ){ + reindexDatabases(pParse, 0); + return; + }else if( pName2==0 || pName2->z==0 ){ + assert( pName1->z ); + pColl = sqlite3FindCollSeq(db, ENC(db), (char*)pName1->z, pName1->n, 0); + if( pColl ){ + char *zColl = sqlite3DbStrNDup(db, (const char *)pName1->z, pName1->n); + if( zColl ){ + reindexDatabases(pParse, zColl); + sqlite3_free(zColl); + } + return; + } + } + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pObjName); + if( iDb<0 ) return; + z = sqlite3NameFromToken(db, pObjName); + if( z==0 ) return; + zDb = db->aDb[iDb].zName; + pTab = sqlite3FindTable(db, z, zDb); + if( pTab ){ + reindexTable(pParse, pTab, 0); + sqlite3_free(z); + return; + } + pIndex = sqlite3FindIndex(db, z, zDb); + sqlite3_free(z); + if( pIndex ){ + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3RefillIndex(pParse, pIndex, -1); + return; + } + sqlite3ErrorMsg(pParse, "unable to identify the object to be reindexed"); +} +#endif + +/* +** Return a dynamicly allocated KeyInfo structure that can be used +** with OP_OpenRead or OP_OpenWrite to access database index pIdx. +** +** If successful, a pointer to the new structure is returned. In this case +** the caller is responsible for calling sqlite3_free() on the returned +** pointer. If an error occurs (out of memory or missing collation +** sequence), NULL is returned and the state of pParse updated to reflect +** the error. +*/ +KeyInfo *sqlite3IndexKeyinfo(Parse *pParse, Index *pIdx){ + int i; + int nCol = pIdx->nColumn; + int nBytes = sizeof(KeyInfo) + (nCol-1)*sizeof(CollSeq*) + nCol; + KeyInfo *pKey = (KeyInfo *)sqlite3DbMallocZero(pParse->db, nBytes); + + if( pKey ){ + pKey->db = pParse->db; + pKey->aSortOrder = (u8 *)&(pKey->aColl[nCol]); + assert( &pKey->aSortOrder[nCol]==&(((u8 *)pKey)[nBytes]) ); + for(i=0; iazColl[i]; + assert( zColl ); + pKey->aColl[i] = sqlite3LocateCollSeq(pParse, zColl, -1); + pKey->aSortOrder[i] = pIdx->aSortOrder[i]; + } + pKey->nField = nCol; + } + + if( pParse->nErr ){ + sqlite3_free(pKey); + pKey = 0; + } + return pKey; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/callback.c b/libraries/sqlite/unix/sqlite-3.5.1/src/callback.c new file mode 100644 index 0000000000..009cfd740e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/callback.c @@ -0,0 +1,378 @@ +/* +** 2005 May 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains functions used to access the internal hash tables +** of user defined functions and collation sequences. +** +** $Id: callback.c,v 1.23 2007/08/29 12:31:26 danielk1977 Exp $ +*/ + +#include "sqliteInt.h" + +/* +** Invoke the 'collation needed' callback to request a collation sequence +** in the database text encoding of name zName, length nName. +** If the collation sequence +*/ +static void callCollNeeded(sqlite3 *db, const char *zName, int nName){ + assert( !db->xCollNeeded || !db->xCollNeeded16 ); + if( nName<0 ) nName = strlen(zName); + if( db->xCollNeeded ){ + char *zExternal = sqlite3DbStrNDup(db, zName, nName); + if( !zExternal ) return; + db->xCollNeeded(db->pCollNeededArg, db, (int)ENC(db), zExternal); + sqlite3_free(zExternal); + } +#ifndef SQLITE_OMIT_UTF16 + if( db->xCollNeeded16 ){ + char const *zExternal; + sqlite3_value *pTmp = sqlite3ValueNew(db); + sqlite3ValueSetStr(pTmp, nName, zName, SQLITE_UTF8, SQLITE_STATIC); + zExternal = sqlite3ValueText(pTmp, SQLITE_UTF16NATIVE); + if( zExternal ){ + db->xCollNeeded16(db->pCollNeededArg, db, (int)ENC(db), zExternal); + } + sqlite3ValueFree(pTmp); + } +#endif +} + +/* +** This routine is called if the collation factory fails to deliver a +** collation function in the best encoding but there may be other versions +** of this collation function (for other text encodings) available. Use one +** of these instead if they exist. Avoid a UTF-8 <-> UTF-16 conversion if +** possible. +*/ +static int synthCollSeq(sqlite3 *db, CollSeq *pColl){ + CollSeq *pColl2; + char *z = pColl->zName; + int n = strlen(z); + int i; + static const u8 aEnc[] = { SQLITE_UTF16BE, SQLITE_UTF16LE, SQLITE_UTF8 }; + for(i=0; i<3; i++){ + pColl2 = sqlite3FindCollSeq(db, aEnc[i], z, n, 0); + if( pColl2->xCmp!=0 ){ + memcpy(pColl, pColl2, sizeof(CollSeq)); + pColl->xDel = 0; /* Do not copy the destructor */ + return SQLITE_OK; + } + } + return SQLITE_ERROR; +} + +/* +** This function is responsible for invoking the collation factory callback +** or substituting a collation sequence of a different encoding when the +** requested collation sequence is not available in the database native +** encoding. +** +** If it is not NULL, then pColl must point to the database native encoding +** collation sequence with name zName, length nName. +** +** The return value is either the collation sequence to be used in database +** db for collation type name zName, length nName, or NULL, if no collation +** sequence can be found. +*/ +CollSeq *sqlite3GetCollSeq( + sqlite3* db, + CollSeq *pColl, + const char *zName, + int nName +){ + CollSeq *p; + + p = pColl; + if( !p ){ + p = sqlite3FindCollSeq(db, ENC(db), zName, nName, 0); + } + if( !p || !p->xCmp ){ + /* No collation sequence of this type for this encoding is registered. + ** Call the collation factory to see if it can supply us with one. + */ + callCollNeeded(db, zName, nName); + p = sqlite3FindCollSeq(db, ENC(db), zName, nName, 0); + } + if( p && !p->xCmp && synthCollSeq(db, p) ){ + p = 0; + } + assert( !p || p->xCmp ); + return p; +} + +/* +** This routine is called on a collation sequence before it is used to +** check that it is defined. An undefined collation sequence exists when +** a database is loaded that contains references to collation sequences +** that have not been defined by sqlite3_create_collation() etc. +** +** If required, this routine calls the 'collation needed' callback to +** request a definition of the collating sequence. If this doesn't work, +** an equivalent collating sequence that uses a text encoding different +** from the main database is substituted, if one is available. +*/ +int sqlite3CheckCollSeq(Parse *pParse, CollSeq *pColl){ + if( pColl ){ + const char *zName = pColl->zName; + CollSeq *p = sqlite3GetCollSeq(pParse->db, pColl, zName, -1); + if( !p ){ + if( pParse->nErr==0 ){ + sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName); + } + pParse->nErr++; + return SQLITE_ERROR; + } + assert( p==pColl ); + } + return SQLITE_OK; +} + + + +/* +** Locate and return an entry from the db.aCollSeq hash table. If the entry +** specified by zName and nName is not found and parameter 'create' is +** true, then create a new entry. Otherwise return NULL. +** +** Each pointer stored in the sqlite3.aCollSeq hash table contains an +** array of three CollSeq structures. The first is the collation sequence +** prefferred for UTF-8, the second UTF-16le, and the third UTF-16be. +** +** Stored immediately after the three collation sequences is a copy of +** the collation sequence name. A pointer to this string is stored in +** each collation sequence structure. +*/ +static CollSeq *findCollSeqEntry( + sqlite3 *db, + const char *zName, + int nName, + int create +){ + CollSeq *pColl; + if( nName<0 ) nName = strlen(zName); + pColl = sqlite3HashFind(&db->aCollSeq, zName, nName); + + if( 0==pColl && create ){ + pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName + 1 ); + if( pColl ){ + CollSeq *pDel = 0; + pColl[0].zName = (char*)&pColl[3]; + pColl[0].enc = SQLITE_UTF8; + pColl[1].zName = (char*)&pColl[3]; + pColl[1].enc = SQLITE_UTF16LE; + pColl[2].zName = (char*)&pColl[3]; + pColl[2].enc = SQLITE_UTF16BE; + memcpy(pColl[0].zName, zName, nName); + pColl[0].zName[nName] = 0; + pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, nName, pColl); + + /* If a malloc() failure occured in sqlite3HashInsert(), it will + ** return the pColl pointer to be deleted (because it wasn't added + ** to the hash table). + */ + assert( pDel==0 || pDel==pColl ); + if( pDel!=0 ){ + db->mallocFailed = 1; + sqlite3_free(pDel); + pColl = 0; + } + } + } + return pColl; +} + +/* +** Parameter zName points to a UTF-8 encoded string nName bytes long. +** Return the CollSeq* pointer for the collation sequence named zName +** for the encoding 'enc' from the database 'db'. +** +** If the entry specified is not found and 'create' is true, then create a +** new entry. Otherwise return NULL. +** +** A separate function sqlite3LocateCollSeq() is a wrapper around +** this routine. sqlite3LocateCollSeq() invokes the collation factory +** if necessary and generates an error message if the collating sequence +** cannot be found. +*/ +CollSeq *sqlite3FindCollSeq( + sqlite3 *db, + u8 enc, + const char *zName, + int nName, + int create +){ + CollSeq *pColl; + if( zName ){ + pColl = findCollSeqEntry(db, zName, nName, create); + }else{ + pColl = db->pDfltColl; + } + assert( SQLITE_UTF8==1 && SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); + assert( enc>=SQLITE_UTF8 && enc<=SQLITE_UTF16BE ); + if( pColl ) pColl += enc-1; + return pColl; +} + +/* +** Locate a user function given a name, a number of arguments and a flag +** indicating whether the function prefers UTF-16 over UTF-8. Return a +** pointer to the FuncDef structure that defines that function, or return +** NULL if the function does not exist. +** +** If the createFlag argument is true, then a new (blank) FuncDef +** structure is created and liked into the "db" structure if a +** no matching function previously existed. When createFlag is true +** and the nArg parameter is -1, then only a function that accepts +** any number of arguments will be returned. +** +** If createFlag is false and nArg is -1, then the first valid +** function found is returned. A function is valid if either xFunc +** or xStep is non-zero. +** +** If createFlag is false, then a function with the required name and +** number of arguments may be returned even if the eTextRep flag does not +** match that requested. +*/ +FuncDef *sqlite3FindFunction( + sqlite3 *db, /* An open database */ + const char *zName, /* Name of the function. Not null-terminated */ + int nName, /* Number of characters in the name */ + int nArg, /* Number of arguments. -1 means any number */ + u8 enc, /* Preferred text encoding */ + int createFlag /* Create new entry if true and does not otherwise exist */ +){ + FuncDef *p; /* Iterator variable */ + FuncDef *pFirst; /* First function with this name */ + FuncDef *pBest = 0; /* Best match found so far */ + int bestmatch = 0; + + + assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); + if( nArg<-1 ) nArg = -1; + + pFirst = (FuncDef*)sqlite3HashFind(&db->aFunc, zName, nName); + for(p=pFirst; p; p=p->pNext){ + /* During the search for the best function definition, bestmatch is set + ** as follows to indicate the quality of the match with the definition + ** pointed to by pBest: + ** + ** 0: pBest is NULL. No match has been found. + ** 1: A variable arguments function that prefers UTF-8 when a UTF-16 + ** encoding is requested, or vice versa. + ** 2: A variable arguments function that uses UTF-16BE when UTF-16LE is + ** requested, or vice versa. + ** 3: A variable arguments function using the same text encoding. + ** 4: A function with the exact number of arguments requested that + ** prefers UTF-8 when a UTF-16 encoding is requested, or vice versa. + ** 5: A function with the exact number of arguments requested that + ** prefers UTF-16LE when UTF-16BE is requested, or vice versa. + ** 6: An exact match. + ** + ** A larger value of 'matchqual' indicates a more desirable match. + */ + if( p->nArg==-1 || p->nArg==nArg || nArg==-1 ){ + int match = 1; /* Quality of this match */ + if( p->nArg==nArg || nArg==-1 ){ + match = 4; + } + if( enc==p->iPrefEnc ){ + match += 2; + } + else if( (enc==SQLITE_UTF16LE && p->iPrefEnc==SQLITE_UTF16BE) || + (enc==SQLITE_UTF16BE && p->iPrefEnc==SQLITE_UTF16LE) ){ + match += 1; + } + + if( match>bestmatch ){ + pBest = p; + bestmatch = match; + } + } + } + + /* If the createFlag parameter is true, and the seach did not reveal an + ** exact match for the name, number of arguments and encoding, then add a + ** new entry to the hash table and return it. + */ + if( createFlag && bestmatch<6 && + (pBest = sqlite3DbMallocZero(db, sizeof(*pBest)+nName))!=0 ){ + pBest->nArg = nArg; + pBest->pNext = pFirst; + pBest->iPrefEnc = enc; + memcpy(pBest->zName, zName, nName); + pBest->zName[nName] = 0; + if( pBest==sqlite3HashInsert(&db->aFunc,pBest->zName,nName,(void*)pBest) ){ + db->mallocFailed = 1; + sqlite3_free(pBest); + return 0; + } + } + + if( pBest && (pBest->xStep || pBest->xFunc || createFlag) ){ + return pBest; + } + return 0; +} + +/* +** Free all resources held by the schema structure. The void* argument points +** at a Schema struct. This function does not call sqlite3_free() on the +** pointer itself, it just cleans up subsiduary resources (i.e. the contents +** of the schema hash tables). +*/ +void sqlite3SchemaFree(void *p){ + Hash temp1; + Hash temp2; + HashElem *pElem; + Schema *pSchema = (Schema *)p; + + temp1 = pSchema->tblHash; + temp2 = pSchema->trigHash; + sqlite3HashInit(&pSchema->trigHash, SQLITE_HASH_STRING, 0); + sqlite3HashClear(&pSchema->aFKey); + sqlite3HashClear(&pSchema->idxHash); + for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){ + sqlite3DeleteTrigger((Trigger*)sqliteHashData(pElem)); + } + sqlite3HashClear(&temp2); + sqlite3HashInit(&pSchema->tblHash, SQLITE_HASH_STRING, 0); + for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){ + Table *pTab = sqliteHashData(pElem); + sqlite3DeleteTable(pTab); + } + sqlite3HashClear(&temp1); + pSchema->pSeqTab = 0; + pSchema->flags &= ~DB_SchemaLoaded; +} + +/* +** Find and return the schema associated with a BTree. Create +** a new one if necessary. +*/ +Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ + Schema * p; + if( pBt ){ + p = (Schema *)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaFree); + }else{ + p = (Schema *)sqlite3MallocZero(sizeof(Schema)); + } + if( !p ){ + db->mallocFailed = 1; + }else if ( 0==p->file_format ){ + sqlite3HashInit(&p->tblHash, SQLITE_HASH_STRING, 0); + sqlite3HashInit(&p->idxHash, SQLITE_HASH_STRING, 0); + sqlite3HashInit(&p->trigHash, SQLITE_HASH_STRING, 0); + sqlite3HashInit(&p->aFKey, SQLITE_HASH_STRING, 1); + p->enc = SQLITE_UTF8; + } + return p; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/complete.c b/libraries/sqlite/unix/sqlite-3.5.1/src/complete.c new file mode 100644 index 0000000000..ae61d8ab06 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/complete.c @@ -0,0 +1,271 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** An tokenizer for SQL +** +** This file contains C code that implements the sqlite3_complete() API. +** This code used to be part of the tokenizer.c source file. But by +** separating it out, the code will be automatically omitted from +** static links that do not use it. +** +** $Id: complete.c,v 1.6 2007/08/27 23:26:59 drh Exp $ +*/ +#include "sqliteInt.h" +#ifndef SQLITE_OMIT_COMPLETE + +/* +** This is defined in tokenize.c. We just have to import the definition. +*/ +#ifndef SQLITE_AMALGAMATION +#ifdef SQLITE_ASCII +extern const char sqlite3IsAsciiIdChar[]; +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && sqlite3IsAsciiIdChar[c-0x20])) +#endif +#ifdef SQLITE_EBCDIC +extern const char sqlite3IsEbcdicIdChar[]; +#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) +#endif +#endif /* SQLITE_AMALGAMATION */ + + +/* +** Token types used by the sqlite3_complete() routine. See the header +** comments on that procedure for additional information. +*/ +#define tkSEMI 0 +#define tkWS 1 +#define tkOTHER 2 +#define tkEXPLAIN 3 +#define tkCREATE 4 +#define tkTEMP 5 +#define tkTRIGGER 6 +#define tkEND 7 + +/* +** Return TRUE if the given SQL string ends in a semicolon. +** +** Special handling is require for CREATE TRIGGER statements. +** Whenever the CREATE TRIGGER keywords are seen, the statement +** must end with ";END;". +** +** This implementation uses a state machine with 7 states: +** +** (0) START At the beginning or end of an SQL statement. This routine +** returns 1 if it ends in the START state and 0 if it ends +** in any other state. +** +** (1) NORMAL We are in the middle of statement which ends with a single +** semicolon. +** +** (2) EXPLAIN The keyword EXPLAIN has been seen at the beginning of +** a statement. +** +** (3) CREATE The keyword CREATE has been seen at the beginning of a +** statement, possibly preceeded by EXPLAIN and/or followed by +** TEMP or TEMPORARY +** +** (4) TRIGGER We are in the middle of a trigger definition that must be +** ended by a semicolon, the keyword END, and another semicolon. +** +** (5) SEMI We've seen the first semicolon in the ";END;" that occurs at +** the end of a trigger definition. +** +** (6) END We've seen the ";END" of the ";END;" that occurs at the end +** of a trigger difinition. +** +** Transitions between states above are determined by tokens extracted +** from the input. The following tokens are significant: +** +** (0) tkSEMI A semicolon. +** (1) tkWS Whitespace +** (2) tkOTHER Any other SQL token. +** (3) tkEXPLAIN The "explain" keyword. +** (4) tkCREATE The "create" keyword. +** (5) tkTEMP The "temp" or "temporary" keyword. +** (6) tkTRIGGER The "trigger" keyword. +** (7) tkEND The "end" keyword. +** +** Whitespace never causes a state transition and is always ignored. +** +** If we compile with SQLITE_OMIT_TRIGGER, all of the computation needed +** to recognize the end of a trigger can be omitted. All we have to do +** is look for a semicolon that is not part of an string or comment. +*/ +int sqlite3_complete(const char *zSql){ + u8 state = 0; /* Current state, using numbers defined in header comment */ + u8 token; /* Value of the next token */ + +#ifndef SQLITE_OMIT_TRIGGER + /* A complex statement machine used to detect the end of a CREATE TRIGGER + ** statement. This is the normal case. + */ + static const u8 trans[7][8] = { + /* Token: */ + /* State: ** SEMI WS OTHER EXPLAIN CREATE TEMP TRIGGER END */ + /* 0 START: */ { 0, 0, 1, 2, 3, 1, 1, 1, }, + /* 1 NORMAL: */ { 0, 1, 1, 1, 1, 1, 1, 1, }, + /* 2 EXPLAIN: */ { 0, 2, 1, 1, 3, 1, 1, 1, }, + /* 3 CREATE: */ { 0, 3, 1, 1, 1, 3, 4, 1, }, + /* 4 TRIGGER: */ { 5, 4, 4, 4, 4, 4, 4, 4, }, + /* 5 SEMI: */ { 5, 5, 4, 4, 4, 4, 4, 6, }, + /* 6 END: */ { 0, 6, 4, 4, 4, 4, 4, 4, }, + }; +#else + /* If triggers are not suppored by this compile then the statement machine + ** used to detect the end of a statement is much simplier + */ + static const u8 trans[2][3] = { + /* Token: */ + /* State: ** SEMI WS OTHER */ + /* 0 START: */ { 0, 0, 1, }, + /* 1 NORMAL: */ { 0, 1, 1, }, + }; +#endif /* SQLITE_OMIT_TRIGGER */ + + while( *zSql ){ + switch( *zSql ){ + case ';': { /* A semicolon */ + token = tkSEMI; + break; + } + case ' ': + case '\r': + case '\t': + case '\n': + case '\f': { /* White space is ignored */ + token = tkWS; + break; + } + case '/': { /* C-style comments */ + if( zSql[1]!='*' ){ + token = tkOTHER; + break; + } + zSql += 2; + while( zSql[0] && (zSql[0]!='*' || zSql[1]!='/') ){ zSql++; } + if( zSql[0]==0 ) return 0; + zSql++; + token = tkWS; + break; + } + case '-': { /* SQL-style comments from "--" to end of line */ + if( zSql[1]!='-' ){ + token = tkOTHER; + break; + } + while( *zSql && *zSql!='\n' ){ zSql++; } + if( *zSql==0 ) return state==0; + token = tkWS; + break; + } + case '[': { /* Microsoft-style identifiers in [...] */ + zSql++; + while( *zSql && *zSql!=']' ){ zSql++; } + if( *zSql==0 ) return 0; + token = tkOTHER; + break; + } + case '`': /* Grave-accent quoted symbols used by MySQL */ + case '"': /* single- and double-quoted strings */ + case '\'': { + int c = *zSql; + zSql++; + while( *zSql && *zSql!=c ){ zSql++; } + if( *zSql==0 ) return 0; + token = tkOTHER; + break; + } + default: { + int c; + if( IdChar((u8)*zSql) ){ + /* Keywords and unquoted identifiers */ + int nId; + for(nId=1; IdChar(zSql[nId]); nId++){} +#ifdef SQLITE_OMIT_TRIGGER + token = tkOTHER; +#else + switch( *zSql ){ + case 'c': case 'C': { + if( nId==6 && sqlite3StrNICmp(zSql, "create", 6)==0 ){ + token = tkCREATE; + }else{ + token = tkOTHER; + } + break; + } + case 't': case 'T': { + if( nId==7 && sqlite3StrNICmp(zSql, "trigger", 7)==0 ){ + token = tkTRIGGER; + }else if( nId==4 && sqlite3StrNICmp(zSql, "temp", 4)==0 ){ + token = tkTEMP; + }else if( nId==9 && sqlite3StrNICmp(zSql, "temporary", 9)==0 ){ + token = tkTEMP; + }else{ + token = tkOTHER; + } + break; + } + case 'e': case 'E': { + if( nId==3 && sqlite3StrNICmp(zSql, "end", 3)==0 ){ + token = tkEND; + }else +#ifndef SQLITE_OMIT_EXPLAIN + if( nId==7 && sqlite3StrNICmp(zSql, "explain", 7)==0 ){ + token = tkEXPLAIN; + }else +#endif + { + token = tkOTHER; + } + break; + } + default: { + token = tkOTHER; + break; + } + } +#endif /* SQLITE_OMIT_TRIGGER */ + zSql += nId-1; + }else{ + /* Operators and special symbols */ + token = tkOTHER; + } + break; + } + } + state = trans[state][token]; + zSql++; + } + return state==0; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** This routine is the same as the sqlite3_complete() routine described +** above, except that the parameter is required to be UTF-16 encoded, not +** UTF-8. +*/ +int sqlite3_complete16(const void *zSql){ + sqlite3_value *pVal; + char const *zSql8; + int rc = SQLITE_NOMEM; + + pVal = sqlite3ValueNew(0); + sqlite3ValueSetStr(pVal, -1, zSql, SQLITE_UTF16NATIVE, SQLITE_STATIC); + zSql8 = sqlite3ValueText(pVal, SQLITE_UTF8); + if( zSql8 ){ + rc = sqlite3_complete(zSql8); + } + sqlite3ValueFree(pVal); + return sqlite3ApiExit(0, rc); +} +#endif /* SQLITE_OMIT_UTF16 */ +#endif /* SQLITE_OMIT_COMPLETE */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/date.c b/libraries/sqlite/unix/sqlite-3.5.1/src/date.c new file mode 100644 index 0000000000..ce1c6c7b39 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/date.c @@ -0,0 +1,1045 @@ +/* +** 2003 October 31 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement date and time +** functions for SQLite. +** +** There is only one exported symbol in this file - the function +** sqlite3RegisterDateTimeFunctions() found at the bottom of the file. +** All other code has file scope. +** +** $Id: date.c,v 1.73 2007/09/12 17:01:45 danielk1977 Exp $ +** +** SQLite processes all times and dates as Julian Day numbers. The +** dates and times are stored as the number of days since noon +** in Greenwich on November 24, 4714 B.C. according to the Gregorian +** calendar system. +** +** 1970-01-01 00:00:00 is JD 2440587.5 +** 2000-01-01 00:00:00 is JD 2451544.5 +** +** This implemention requires years to be expressed as a 4-digit number +** which means that only dates between 0000-01-01 and 9999-12-31 can +** be represented, even though julian day numbers allow a much wider +** range of dates. +** +** The Gregorian calendar system is used for all dates and times, +** even those that predate the Gregorian calendar. Historians usually +** use the Julian calendar for dates prior to 1582-10-15 and for some +** dates afterwards, depending on locale. Beware of this difference. +** +** The conversion algorithms are implemented based on descriptions +** in the following text: +** +** Jean Meeus +** Astronomical Algorithms, 2nd Edition, 1998 +** ISBM 0-943396-61-1 +** Willmann-Bell, Inc +** Richmond, Virginia (USA) +*/ +#include "sqliteInt.h" +#include +#include +#include +#include + +#ifndef SQLITE_OMIT_DATETIME_FUNCS + +/* +** A structure for holding a single date and time. +*/ +typedef struct DateTime DateTime; +struct DateTime { + double rJD; /* The julian day number */ + int Y, M, D; /* Year, month, and day */ + int h, m; /* Hour and minutes */ + int tz; /* Timezone offset in minutes */ + double s; /* Seconds */ + char validYMD; /* True if Y,M,D are valid */ + char validHMS; /* True if h,m,s are valid */ + char validJD; /* True if rJD is valid */ + char validTZ; /* True if tz is valid */ +}; + + +/* +** Convert zDate into one or more integers. Additional arguments +** come in groups of 5 as follows: +** +** N number of digits in the integer +** min minimum allowed value of the integer +** max maximum allowed value of the integer +** nextC first character after the integer +** pVal where to write the integers value. +** +** Conversions continue until one with nextC==0 is encountered. +** The function returns the number of successful conversions. +*/ +static int getDigits(const char *zDate, ...){ + va_list ap; + int val; + int N; + int min; + int max; + int nextC; + int *pVal; + int cnt = 0; + va_start(ap, zDate); + do{ + N = va_arg(ap, int); + min = va_arg(ap, int); + max = va_arg(ap, int); + nextC = va_arg(ap, int); + pVal = va_arg(ap, int*); + val = 0; + while( N-- ){ + if( !isdigit(*(u8*)zDate) ){ + goto end_getDigits; + } + val = val*10 + *zDate - '0'; + zDate++; + } + if( valmax || (nextC!=0 && nextC!=*zDate) ){ + goto end_getDigits; + } + *pVal = val; + zDate++; + cnt++; + }while( nextC ); +end_getDigits: + va_end(ap); + return cnt; +} + +/* +** Read text from z[] and convert into a floating point number. Return +** the number of digits converted. +*/ +#define getValue sqlite3AtoF + +/* +** Parse a timezone extension on the end of a date-time. +** The extension is of the form: +** +** (+/-)HH:MM +** +** If the parse is successful, write the number of minutes +** of change in *pnMin and return 0. If a parser error occurs, +** return 0. +** +** A missing specifier is not considered an error. +*/ +static int parseTimezone(const char *zDate, DateTime *p){ + int sgn = 0; + int nHr, nMn; + while( isspace(*(u8*)zDate) ){ zDate++; } + p->tz = 0; + if( *zDate=='-' ){ + sgn = -1; + }else if( *zDate=='+' ){ + sgn = +1; + }else{ + return *zDate!=0; + } + zDate++; + if( getDigits(zDate, 2, 0, 14, ':', &nHr, 2, 0, 59, 0, &nMn)!=2 ){ + return 1; + } + zDate += 5; + p->tz = sgn*(nMn + nHr*60); + while( isspace(*(u8*)zDate) ){ zDate++; } + return *zDate!=0; +} + +/* +** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF. +** The HH, MM, and SS must each be exactly 2 digits. The +** fractional seconds FFFF can be one or more digits. +** +** Return 1 if there is a parsing error and 0 on success. +*/ +static int parseHhMmSs(const char *zDate, DateTime *p){ + int h, m, s; + double ms = 0.0; + if( getDigits(zDate, 2, 0, 24, ':', &h, 2, 0, 59, 0, &m)!=2 ){ + return 1; + } + zDate += 5; + if( *zDate==':' ){ + zDate++; + if( getDigits(zDate, 2, 0, 59, 0, &s)!=1 ){ + return 1; + } + zDate += 2; + if( *zDate=='.' && isdigit((u8)zDate[1]) ){ + double rScale = 1.0; + zDate++; + while( isdigit(*(u8*)zDate) ){ + ms = ms*10.0 + *zDate - '0'; + rScale *= 10.0; + zDate++; + } + ms /= rScale; + } + }else{ + s = 0; + } + p->validJD = 0; + p->validHMS = 1; + p->h = h; + p->m = m; + p->s = s + ms; + if( parseTimezone(zDate, p) ) return 1; + p->validTZ = p->tz!=0; + return 0; +} + +/* +** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume +** that the YYYY-MM-DD is according to the Gregorian calendar. +** +** Reference: Meeus page 61 +*/ +static void computeJD(DateTime *p){ + int Y, M, D, A, B, X1, X2; + + if( p->validJD ) return; + if( p->validYMD ){ + Y = p->Y; + M = p->M; + D = p->D; + }else{ + Y = 2000; /* If no YMD specified, assume 2000-Jan-01 */ + M = 1; + D = 1; + } + if( M<=2 ){ + Y--; + M += 12; + } + A = Y/100; + B = 2 - A + (A/4); + X1 = 365.25*(Y+4716); + X2 = 30.6001*(M+1); + p->rJD = X1 + X2 + D + B - 1524.5; + p->validJD = 1; + if( p->validHMS ){ + p->rJD += (p->h*3600.0 + p->m*60.0 + p->s)/86400.0; + if( p->validTZ ){ + p->rJD -= p->tz*60/86400.0; + p->validYMD = 0; + p->validHMS = 0; + p->validTZ = 0; + } + } +} + +/* +** Parse dates of the form +** +** YYYY-MM-DD HH:MM:SS.FFF +** YYYY-MM-DD HH:MM:SS +** YYYY-MM-DD HH:MM +** YYYY-MM-DD +** +** Write the result into the DateTime structure and return 0 +** on success and 1 if the input string is not a well-formed +** date. +*/ +static int parseYyyyMmDd(const char *zDate, DateTime *p){ + int Y, M, D, neg; + + if( zDate[0]=='-' ){ + zDate++; + neg = 1; + }else{ + neg = 0; + } + if( getDigits(zDate,4,0,9999,'-',&Y,2,1,12,'-',&M,2,1,31,0,&D)!=3 ){ + return 1; + } + zDate += 10; + while( isspace(*(u8*)zDate) || 'T'==*(u8*)zDate ){ zDate++; } + if( parseHhMmSs(zDate, p)==0 ){ + /* We got the time */ + }else if( *zDate==0 ){ + p->validHMS = 0; + }else{ + return 1; + } + p->validJD = 0; + p->validYMD = 1; + p->Y = neg ? -Y : Y; + p->M = M; + p->D = D; + if( p->validTZ ){ + computeJD(p); + } + return 0; +} + +/* +** Attempt to parse the given string into a Julian Day Number. Return +** the number of errors. +** +** The following are acceptable forms for the input string: +** +** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM +** DDDD.DD +** now +** +** In the first form, the +/-HH:MM is always optional. The fractional +** seconds extension (the ".FFF") is optional. The seconds portion +** (":SS.FFF") is option. The year and date can be omitted as long +** as there is a time string. The time string can be omitted as long +** as there is a year and date. +*/ +static int parseDateOrTime( + sqlite3_context *context, + const char *zDate, + DateTime *p +){ + memset(p, 0, sizeof(*p)); + if( parseYyyyMmDd(zDate,p)==0 ){ + return 0; + }else if( parseHhMmSs(zDate, p)==0 ){ + return 0; + }else if( sqlite3StrICmp(zDate,"now")==0){ + double r; + sqlite3OsCurrentTime((sqlite3_vfs *)sqlite3_user_data(context), &r); + p->rJD = r; + p->validJD = 1; + return 0; + }else if( sqlite3IsNumber(zDate, 0, SQLITE_UTF8) ){ + getValue(zDate, &p->rJD); + p->validJD = 1; + return 0; + } + return 1; +} + +/* +** Compute the Year, Month, and Day from the julian day number. +*/ +static void computeYMD(DateTime *p){ + int Z, A, B, C, D, E, X1; + if( p->validYMD ) return; + if( !p->validJD ){ + p->Y = 2000; + p->M = 1; + p->D = 1; + }else{ + Z = p->rJD + 0.5; + A = (Z - 1867216.25)/36524.25; + A = Z + 1 + A - (A/4); + B = A + 1524; + C = (B - 122.1)/365.25; + D = 365.25*C; + E = (B-D)/30.6001; + X1 = 30.6001*E; + p->D = B - D - X1; + p->M = E<14 ? E-1 : E-13; + p->Y = p->M>2 ? C - 4716 : C - 4715; + } + p->validYMD = 1; +} + +/* +** Compute the Hour, Minute, and Seconds from the julian day number. +*/ +static void computeHMS(DateTime *p){ + int Z, s; + if( p->validHMS ) return; + computeJD(p); + Z = p->rJD + 0.5; + s = (p->rJD + 0.5 - Z)*86400000.0 + 0.5; + p->s = 0.001*s; + s = p->s; + p->s -= s; + p->h = s/3600; + s -= p->h*3600; + p->m = s/60; + p->s += s - p->m*60; + p->validHMS = 1; +} + +/* +** Compute both YMD and HMS +*/ +static void computeYMD_HMS(DateTime *p){ + computeYMD(p); + computeHMS(p); +} + +/* +** Clear the YMD and HMS and the TZ +*/ +static void clearYMD_HMS_TZ(DateTime *p){ + p->validYMD = 0; + p->validHMS = 0; + p->validTZ = 0; +} + +/* +** Compute the difference (in days) between localtime and UTC (a.k.a. GMT) +** for the time value p where p is in UTC. +*/ +static double localtimeOffset(DateTime *p){ + DateTime x, y; + time_t t; + x = *p; + computeYMD_HMS(&x); + if( x.Y<1971 || x.Y>=2038 ){ + x.Y = 2000; + x.M = 1; + x.D = 1; + x.h = 0; + x.m = 0; + x.s = 0.0; + } else { + int s = x.s + 0.5; + x.s = s; + } + x.tz = 0; + x.validJD = 0; + computeJD(&x); + t = (x.rJD-2440587.5)*86400.0 + 0.5; +#ifdef HAVE_LOCALTIME_R + { + struct tm sLocal; + localtime_r(&t, &sLocal); + y.Y = sLocal.tm_year + 1900; + y.M = sLocal.tm_mon + 1; + y.D = sLocal.tm_mday; + y.h = sLocal.tm_hour; + y.m = sLocal.tm_min; + y.s = sLocal.tm_sec; + } +#else + { + struct tm *pTm; + sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)); + pTm = localtime(&t); + y.Y = pTm->tm_year + 1900; + y.M = pTm->tm_mon + 1; + y.D = pTm->tm_mday; + y.h = pTm->tm_hour; + y.m = pTm->tm_min; + y.s = pTm->tm_sec; + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)); + } +#endif + y.validYMD = 1; + y.validHMS = 1; + y.validJD = 0; + y.validTZ = 0; + computeJD(&y); + return y.rJD - x.rJD; +} + +/* +** Process a modifier to a date-time stamp. The modifiers are +** as follows: +** +** NNN days +** NNN hours +** NNN minutes +** NNN.NNNN seconds +** NNN months +** NNN years +** start of month +** start of year +** start of week +** start of day +** weekday N +** unixepoch +** localtime +** utc +** +** Return 0 on success and 1 if there is any kind of error. +*/ +static int parseModifier(const char *zMod, DateTime *p){ + int rc = 1; + int n; + double r; + char *z, zBuf[30]; + z = zBuf; + for(n=0; nrJD += localtimeOffset(p); + clearYMD_HMS_TZ(p); + rc = 0; + } + break; + } + case 'u': { + /* + ** unixepoch + ** + ** Treat the current value of p->rJD as the number of + ** seconds since 1970. Convert to a real julian day number. + */ + if( strcmp(z, "unixepoch")==0 && p->validJD ){ + p->rJD = p->rJD/86400.0 + 2440587.5; + clearYMD_HMS_TZ(p); + rc = 0; + }else if( strcmp(z, "utc")==0 ){ + double c1; + computeJD(p); + c1 = localtimeOffset(p); + p->rJD -= c1; + clearYMD_HMS_TZ(p); + p->rJD += c1 - localtimeOffset(p); + rc = 0; + } + break; + } + case 'w': { + /* + ** weekday N + ** + ** Move the date to the same time on the next occurrence of + ** weekday N where 0==Sunday, 1==Monday, and so forth. If the + ** date is already on the appropriate weekday, this is a no-op. + */ + if( strncmp(z, "weekday ", 8)==0 && getValue(&z[8],&r)>0 + && (n=r)==r && n>=0 && r<7 ){ + int Z; + computeYMD_HMS(p); + p->validTZ = 0; + p->validJD = 0; + computeJD(p); + Z = p->rJD + 1.5; + Z %= 7; + if( Z>n ) Z -= 7; + p->rJD += n - Z; + clearYMD_HMS_TZ(p); + rc = 0; + } + break; + } + case 's': { + /* + ** start of TTTTT + ** + ** Move the date backwards to the beginning of the current day, + ** or month or year. + */ + if( strncmp(z, "start of ", 9)!=0 ) break; + z += 9; + computeYMD(p); + p->validHMS = 1; + p->h = p->m = 0; + p->s = 0.0; + p->validTZ = 0; + p->validJD = 0; + if( strcmp(z,"month")==0 ){ + p->D = 1; + rc = 0; + }else if( strcmp(z,"year")==0 ){ + computeYMD(p); + p->M = 1; + p->D = 1; + rc = 0; + }else if( strcmp(z,"day")==0 ){ + rc = 0; + } + break; + } + case '+': + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { + n = getValue(z, &r); + assert( n>=1 ); + if( z[n]==':' ){ + /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the + ** specified number of hours, minutes, seconds, and fractional seconds + ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be + ** omitted. + */ + const char *z2 = z; + DateTime tx; + int day; + if( !isdigit(*(u8*)z2) ) z2++; + memset(&tx, 0, sizeof(tx)); + if( parseHhMmSs(z2, &tx) ) break; + computeJD(&tx); + tx.rJD -= 0.5; + day = (int)tx.rJD; + tx.rJD -= day; + if( z[0]=='-' ) tx.rJD = -tx.rJD; + computeJD(p); + clearYMD_HMS_TZ(p); + p->rJD += tx.rJD; + rc = 0; + break; + } + z += n; + while( isspace(*(u8*)z) ) z++; + n = strlen(z); + if( n>10 || n<3 ) break; + if( z[n-1]=='s' ){ z[n-1] = 0; n--; } + computeJD(p); + rc = 0; + if( n==3 && strcmp(z,"day")==0 ){ + p->rJD += r; + }else if( n==4 && strcmp(z,"hour")==0 ){ + p->rJD += r/24.0; + }else if( n==6 && strcmp(z,"minute")==0 ){ + p->rJD += r/(24.0*60.0); + }else if( n==6 && strcmp(z,"second")==0 ){ + p->rJD += r/(24.0*60.0*60.0); + }else if( n==5 && strcmp(z,"month")==0 ){ + int x, y; + computeYMD_HMS(p); + p->M += r; + x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; + p->Y += x; + p->M -= x*12; + p->validJD = 0; + computeJD(p); + y = r; + if( y!=r ){ + p->rJD += (r - y)*30.0; + } + }else if( n==4 && strcmp(z,"year")==0 ){ + computeYMD_HMS(p); + p->Y += r; + p->validJD = 0; + computeJD(p); + }else{ + rc = 1; + } + clearYMD_HMS_TZ(p); + break; + } + default: { + break; + } + } + return rc; +} + +/* +** Process time function arguments. argv[0] is a date-time stamp. +** argv[1] and following are modifiers. Parse them all and write +** the resulting time into the DateTime structure p. Return 0 +** on success and 1 if there are any errors. +*/ +static int isDate( + sqlite3_context *context, + int argc, + sqlite3_value **argv, + DateTime *p +){ + int i; + const unsigned char *z; + if( argc==0 ) return 1; + z = sqlite3_value_text(argv[0]); + if( !z || parseDateOrTime(context, (char*)z, p) ){ + return 1; + } + for(i=1; iSQLITE_MAX_LENGTH ){ + sqlite3_result_error_toobig(context); + return; + }else{ + z = sqlite3_malloc( n ); + if( z==0 ) return; + } + computeJD(&x); + computeYMD_HMS(&x); + for(i=j=0; zFmt[i]; i++){ + if( zFmt[i]!='%' ){ + z[j++] = zFmt[i]; + }else{ + i++; + switch( zFmt[i] ){ + case 'd': sqlite3_snprintf(3, &z[j],"%02d",x.D); j+=2; break; + case 'f': { + double s = x.s; + if( s>59.999 ) s = 59.999; + sqlite3_snprintf(7, &z[j],"%06.3f", s); + j += strlen(&z[j]); + break; + } + case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break; + case 'W': /* Fall thru */ + case 'j': { + int nDay; /* Number of days since 1st day of year */ + DateTime y = x; + y.validJD = 0; + y.M = 1; + y.D = 1; + computeJD(&y); + nDay = x.rJD - y.rJD + 0.5; + if( zFmt[i]=='W' ){ + int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ + wd = ((int)(x.rJD+0.5)) % 7; + sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7); + j += 2; + }else{ + sqlite3_snprintf(4, &z[j],"%03d",nDay+1); + j += 3; + } + break; + } + case 'J': { + sqlite3_snprintf(20, &z[j],"%.16g",x.rJD); + j+=strlen(&z[j]); + break; + } + case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break; + case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break; + case 's': { + sqlite3_snprintf(30,&z[j],"%d", + (int)((x.rJD-2440587.5)*86400.0 + 0.5)); + j += strlen(&z[j]); + break; + } + case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break; + case 'w': z[j++] = (((int)(x.rJD+1.5)) % 7) + '0'; break; + case 'Y': sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=strlen(&z[j]);break; + case '%': z[j++] = '%'; break; + } + } + } + z[j] = 0; + sqlite3_result_text(context, z, -1, SQLITE_TRANSIENT); + if( z!=zBuf ){ + sqlite3_free(z); + } +} + +/* +** current_time() +** +** This function returns the same value as time('now'). +*/ +static void ctimeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + sqlite3_value *pVal = sqlite3ValueNew(0); + if( pVal ){ + sqlite3ValueSetStr(pVal, -1, "now", SQLITE_UTF8, SQLITE_STATIC); + timeFunc(context, 1, &pVal); + sqlite3ValueFree(pVal); + } +} + +/* +** current_date() +** +** This function returns the same value as date('now'). +*/ +static void cdateFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + sqlite3_value *pVal = sqlite3ValueNew(0); + if( pVal ){ + sqlite3ValueSetStr(pVal, -1, "now", SQLITE_UTF8, SQLITE_STATIC); + dateFunc(context, 1, &pVal); + sqlite3ValueFree(pVal); + } +} + +/* +** current_timestamp() +** +** This function returns the same value as datetime('now'). +*/ +static void ctimestampFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + sqlite3_value *pVal = sqlite3ValueNew(0); + if( pVal ){ + sqlite3ValueSetStr(pVal, -1, "now", SQLITE_UTF8, SQLITE_STATIC); + datetimeFunc(context, 1, &pVal); + sqlite3ValueFree(pVal); + } +} +#endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */ + +#ifdef SQLITE_OMIT_DATETIME_FUNCS +/* +** If the library is compiled to omit the full-scale date and time +** handling (to get a smaller binary), the following minimal version +** of the functions current_time(), current_date() and current_timestamp() +** are included instead. This is to support column declarations that +** include "DEFAULT CURRENT_TIME" etc. +** +** This function uses the C-library functions time(), gmtime() +** and strftime(). The format string to pass to strftime() is supplied +** as the user-data for the function. +*/ +static void currentTimeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + time_t t; + char *zFormat = (char *)sqlite3_user_data(context); + char zBuf[20]; + + time(&t); +#ifdef SQLITE_TEST + { + extern int sqlite3_current_time; /* See os_XXX.c */ + if( sqlite3_current_time ){ + t = sqlite3_current_time; + } + } +#endif + +#ifdef HAVE_GMTIME_R + { + struct tm sNow; + gmtime_r(&t, &sNow); + strftime(zBuf, 20, zFormat, &sNow); + } +#else + { + struct tm *pTm; + sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)); + pTm = gmtime(&t); + strftime(zBuf, 20, zFormat, pTm); + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)); + } +#endif + + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); +} +#endif + +/* +** This function registered all of the above C functions as SQL +** functions. This should be the only routine in this file with +** external linkage. +*/ +void sqlite3RegisterDateTimeFunctions(sqlite3 *db){ +#ifndef SQLITE_OMIT_DATETIME_FUNCS + static const struct { + char *zName; + int nArg; + void (*xFunc)(sqlite3_context*,int,sqlite3_value**); + } aFuncs[] = { + { "julianday", -1, juliandayFunc }, + { "date", -1, dateFunc }, + { "time", -1, timeFunc }, + { "datetime", -1, datetimeFunc }, + { "strftime", -1, strftimeFunc }, + { "current_time", 0, ctimeFunc }, + { "current_timestamp", 0, ctimestampFunc }, + { "current_date", 0, cdateFunc }, + }; + int i; + + for(i=0; ipVfs), aFuncs[i].xFunc, 0, 0); + } +#else + static const struct { + char *zName; + char *zFormat; + } aFuncs[] = { + { "current_time", "%H:%M:%S" }, + { "current_date", "%Y-%m-%d" }, + { "current_timestamp", "%Y-%m-%d %H:%M:%S" } + }; + int i; + + for(i=0; izErrMsg and return NULL. If all tables +** are found, return a pointer to the last table. +*/ +Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ + Table *pTab = 0; + int i; + struct SrcList_item *pItem; + for(i=0, pItem=pSrc->a; inSrc; i++, pItem++){ + pTab = sqlite3LocateTable(pParse, pItem->zName, pItem->zDatabase); + sqlite3DeleteTable(pItem->pTab); + pItem->pTab = pTab; + if( pTab ){ + pTab->nRef++; + } + } + return pTab; +} + +/* +** Check to make sure the given table is writable. If it is not +** writable, generate an error message and return 1. If it is +** writable return 0; +*/ +int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ + if( (pTab->readOnly && (pParse->db->flags & SQLITE_WriteSchema)==0 + && pParse->nested==0) +#ifndef SQLITE_OMIT_VIRTUALTABLE + || (pTab->pMod && pTab->pMod->pModule->xUpdate==0) +#endif + ){ + sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName); + return 1; + } +#ifndef SQLITE_OMIT_VIEW + if( !viewOk && pTab->pSelect ){ + sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); + return 1; + } +#endif + return 0; +} + +/* +** Generate code that will open a table for reading. +*/ +void sqlite3OpenTable( + Parse *p, /* Generate code into this VDBE */ + int iCur, /* The cursor number of the table */ + int iDb, /* The database index in sqlite3.aDb[] */ + Table *pTab, /* The table to be opened */ + int opcode /* OP_OpenRead or OP_OpenWrite */ +){ + Vdbe *v; + if( IsVirtual(pTab) ) return; + v = sqlite3GetVdbe(p); + assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); + sqlite3TableLock(p, iDb, pTab->tnum, (opcode==OP_OpenWrite), pTab->zName); + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + VdbeComment((v, "# %s", pTab->zName)); + sqlite3VdbeAddOp(v, opcode, iCur, pTab->tnum); + sqlite3VdbeAddOp(v, OP_SetNumColumns, iCur, pTab->nCol); +} + + +/* +** Generate code for a DELETE FROM statement. +** +** DELETE FROM table_wxyz WHERE a<5 AND b NOT NULL; +** \________/ \________________/ +** pTabList pWhere +*/ +void sqlite3DeleteFrom( + Parse *pParse, /* The parser context */ + SrcList *pTabList, /* The table from which we should delete things */ + Expr *pWhere /* The WHERE clause. May be null */ +){ + Vdbe *v; /* The virtual database engine */ + Table *pTab; /* The table from which records will be deleted */ + const char *zDb; /* Name of database holding pTab */ + int end, addr = 0; /* A couple addresses of generated code */ + int i; /* Loop counter */ + WhereInfo *pWInfo; /* Information about the WHERE clause */ + Index *pIdx; /* For looping over indices of the table */ + int iCur; /* VDBE Cursor number for pTab */ + sqlite3 *db; /* Main database structure */ + AuthContext sContext; /* Authorization context */ + int oldIdx = -1; /* Cursor for the OLD table of AFTER triggers */ + NameContext sNC; /* Name context to resolve expressions in */ + int iDb; /* Database number */ + int memCnt = 0; /* Memory cell used for change counting */ + +#ifndef SQLITE_OMIT_TRIGGER + int isView; /* True if attempting to delete from a view */ + int triggers_exist = 0; /* True if any triggers exist */ +#endif + + sContext.pParse = 0; + db = pParse->db; + if( pParse->nErr || db->mallocFailed ){ + goto delete_from_cleanup; + } + assert( pTabList->nSrc==1 ); + + /* Locate the table which we want to delete. This table has to be + ** put in an SrcList structure because some of the subroutines we + ** will be calling are designed to work with multiple tables and expect + ** an SrcList* parameter instead of just a Table* parameter. + */ + pTab = sqlite3SrcListLookup(pParse, pTabList); + if( pTab==0 ) goto delete_from_cleanup; + + /* Figure out if we have any triggers and if the table being + ** deleted from is a view + */ +#ifndef SQLITE_OMIT_TRIGGER + triggers_exist = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0); + isView = pTab->pSelect!=0; +#else +# define triggers_exist 0 +# define isView 0 +#endif +#ifdef SQLITE_OMIT_VIEW +# undef isView +# define isView 0 +#endif + + if( sqlite3IsReadOnly(pParse, pTab, triggers_exist) ){ + goto delete_from_cleanup; + } + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDbnDb ); + zDb = db->aDb[iDb].zName; + if( sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){ + goto delete_from_cleanup; + } + + /* If pTab is really a view, make sure it has been initialized. + */ + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto delete_from_cleanup; + } + + /* Allocate a cursor used to store the old.* data for a trigger. + */ + if( triggers_exist ){ + oldIdx = pParse->nTab++; + } + + /* Resolve the column names in the WHERE clause. + */ + assert( pTabList->nSrc==1 ); + iCur = pTabList->a[0].iCursor = pParse->nTab++; + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + sNC.pSrcList = pTabList; + if( sqlite3ExprResolveNames(&sNC, pWhere) ){ + goto delete_from_cleanup; + } + + /* Start the view context + */ + if( isView ){ + sqlite3AuthContextPush(pParse, &sContext, pTab->zName); + } + + /* Begin generating code. + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ){ + goto delete_from_cleanup; + } + if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); + sqlite3BeginWriteOperation(pParse, triggers_exist, iDb); + + /* If we are trying to delete from a view, realize that view into + ** a ephemeral table. + */ + if( isView ){ + Select *pView = sqlite3SelectDup(db, pTab->pSelect); + sqlite3Select(pParse, pView, SRT_EphemTab, iCur, 0, 0, 0, 0); + sqlite3SelectDelete(pView); + } + + /* Initialize the counter of the number of rows deleted, if + ** we are counting rows. + */ + if( db->flags & SQLITE_CountRows ){ + memCnt = pParse->nMem++; + sqlite3VdbeAddOp(v, OP_MemInt, 0, memCnt); + } + + /* Special case: A DELETE without a WHERE clause deletes everything. + ** It is easier just to erase the whole table. Note, however, that + ** this means that the row change count will be incorrect. + */ + if( pWhere==0 && !triggers_exist && !IsVirtual(pTab) ){ + if( db->flags & SQLITE_CountRows ){ + /* If counting rows deleted, just count the total number of + ** entries in the table. */ + int endOfLoop = sqlite3VdbeMakeLabel(v); + int addr2; + if( !isView ){ + sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); + } + sqlite3VdbeAddOp(v, OP_Rewind, iCur, sqlite3VdbeCurrentAddr(v)+2); + addr2 = sqlite3VdbeAddOp(v, OP_MemIncr, 1, memCnt); + sqlite3VdbeAddOp(v, OP_Next, iCur, addr2); + sqlite3VdbeResolveLabel(v, endOfLoop); + sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + } + if( !isView ){ + sqlite3VdbeAddOp(v, OP_Clear, pTab->tnum, iDb); + if( !pParse->nested ){ + sqlite3VdbeChangeP3(v, -1, pTab->zName, P3_STATIC); + } + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + assert( pIdx->pSchema==pTab->pSchema ); + sqlite3VdbeAddOp(v, OP_Clear, pIdx->tnum, iDb); + } + } + } + /* The usual case: There is a WHERE clause so we have to scan through + ** the table and pick which records to delete. + */ + else{ + /* Begin the database scan + */ + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0); + if( pWInfo==0 ) goto delete_from_cleanup; + + /* Remember the rowid of every item to be deleted. + */ + sqlite3VdbeAddOp(v, IsVirtual(pTab) ? OP_VRowid : OP_Rowid, iCur, 0); + sqlite3VdbeAddOp(v, OP_FifoWrite, 0, 0); + if( db->flags & SQLITE_CountRows ){ + sqlite3VdbeAddOp(v, OP_MemIncr, 1, memCnt); + } + + /* End the database scan loop. + */ + sqlite3WhereEnd(pWInfo); + + /* Open the pseudo-table used to store OLD if there are triggers. + */ + if( triggers_exist ){ + sqlite3VdbeAddOp(v, OP_OpenPseudo, oldIdx, 0); + sqlite3VdbeAddOp(v, OP_SetNumColumns, oldIdx, pTab->nCol); + } + + /* Delete every item whose key was written to the list during the + ** database scan. We have to delete items after the scan is complete + ** because deleting an item can change the scan order. + */ + end = sqlite3VdbeMakeLabel(v); + + /* This is the beginning of the delete loop when there are + ** row triggers. + */ + if( triggers_exist ){ + addr = sqlite3VdbeAddOp(v, OP_FifoRead, 0, end); + if( !isView ){ + sqlite3VdbeAddOp(v, OP_Dup, 0, 0); + sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); + } + sqlite3VdbeAddOp(v, OP_MoveGe, iCur, 0); + sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); + sqlite3VdbeAddOp(v, OP_RowData, iCur, 0); + sqlite3VdbeAddOp(v, OP_Insert, oldIdx, 0); + if( !isView ){ + sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + } + + (void)sqlite3CodeRowTrigger(pParse, TK_DELETE, 0, TRIGGER_BEFORE, pTab, + -1, oldIdx, (pParse->trigStack)?pParse->trigStack->orconf:OE_Default, + addr); + } + + if( !isView ){ + /* Open cursors for the table we are deleting from and all its + ** indices. If there are row triggers, this happens inside the + ** OP_FifoRead loop because the cursor have to all be closed + ** before the trigger fires. If there are no row triggers, the + ** cursors are opened only once on the outside the loop. + */ + sqlite3OpenTableAndIndices(pParse, pTab, iCur, OP_OpenWrite); + + /* This is the beginning of the delete loop when there are no + ** row triggers */ + if( !triggers_exist ){ + addr = sqlite3VdbeAddOp(v, OP_FifoRead, 0, end); + } + + /* Delete the row */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + pParse->pVirtualLock = pTab; + sqlite3VdbeOp3(v, OP_VUpdate, 0, 1, (const char*)pTab->pVtab, P3_VTAB); + }else +#endif + { + sqlite3GenerateRowDelete(db, v, pTab, iCur, pParse->nested==0); + } + } + + /* If there are row triggers, close all cursors then invoke + ** the AFTER triggers + */ + if( triggers_exist ){ + if( !isView ){ + for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ + sqlite3VdbeAddOp(v, OP_Close, iCur + i, pIdx->tnum); + } + sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + } + (void)sqlite3CodeRowTrigger(pParse, TK_DELETE, 0, TRIGGER_AFTER, pTab, -1, + oldIdx, (pParse->trigStack)?pParse->trigStack->orconf:OE_Default, + addr); + } + + /* End of the delete loop */ + sqlite3VdbeAddOp(v, OP_Goto, 0, addr); + sqlite3VdbeResolveLabel(v, end); + + /* Close the cursors after the loop if there are no row triggers */ + if( !triggers_exist && !IsVirtual(pTab) ){ + for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ + sqlite3VdbeAddOp(v, OP_Close, iCur + i, pIdx->tnum); + } + sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + } + } + + /* + ** Return the number of rows that were deleted. If this routine is + ** generating code because of a call to sqlite3NestedParse(), do not + ** invoke the callback function. + */ + if( db->flags & SQLITE_CountRows && pParse->nested==0 && !pParse->trigStack ){ + sqlite3VdbeAddOp(v, OP_MemLoad, memCnt, 0); + sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", P3_STATIC); + } + +delete_from_cleanup: + sqlite3AuthContextPop(&sContext); + sqlite3SrcListDelete(pTabList); + sqlite3ExprDelete(pWhere); + return; +} + +/* +** This routine generates VDBE code that causes a single row of a +** single table to be deleted. +** +** The VDBE must be in a particular state when this routine is called. +** These are the requirements: +** +** 1. A read/write cursor pointing to pTab, the table containing the row +** to be deleted, must be opened as cursor number "base". +** +** 2. Read/write cursors for all indices of pTab must be open as +** cursor number base+i for the i-th index. +** +** 3. The record number of the row to be deleted must be on the top +** of the stack. +** +** This routine pops the top of the stack to remove the record number +** and then generates code to remove both the table record and all index +** entries that point to that record. +*/ +void sqlite3GenerateRowDelete( + sqlite3 *db, /* The database containing the index */ + Vdbe *v, /* Generate code into this VDBE */ + Table *pTab, /* Table containing the row to be deleted */ + int iCur, /* Cursor number for the table */ + int count /* Increment the row change counter */ +){ + int addr; + addr = sqlite3VdbeAddOp(v, OP_NotExists, iCur, 0); + sqlite3GenerateRowIndexDelete(v, pTab, iCur, 0); + sqlite3VdbeAddOp(v, OP_Delete, iCur, (count?OPFLAG_NCHANGE:0)); + if( count ){ + sqlite3VdbeChangeP3(v, -1, pTab->zName, P3_STATIC); + } + sqlite3VdbeJumpHere(v, addr); +} + +/* +** This routine generates VDBE code that causes the deletion of all +** index entries associated with a single row of a single table. +** +** The VDBE must be in a particular state when this routine is called. +** These are the requirements: +** +** 1. A read/write cursor pointing to pTab, the table containing the row +** to be deleted, must be opened as cursor number "iCur". +** +** 2. Read/write cursors for all indices of pTab must be open as +** cursor number iCur+i for the i-th index. +** +** 3. The "iCur" cursor must be pointing to the row that is to be +** deleted. +*/ +void sqlite3GenerateRowIndexDelete( + Vdbe *v, /* Generate code into this VDBE */ + Table *pTab, /* Table containing the row to be deleted */ + int iCur, /* Cursor number for the table */ + char *aIdxUsed /* Only delete if aIdxUsed!=0 && aIdxUsed[i]!=0 */ +){ + int i; + Index *pIdx; + + for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ + if( aIdxUsed!=0 && aIdxUsed[i-1]==0 ) continue; + sqlite3GenerateIndexKey(v, pIdx, iCur); + sqlite3VdbeAddOp(v, OP_IdxDelete, iCur+i, 0); + } +} + +/* +** Generate code that will assemble an index key and put it on the top +** of the tack. The key with be for index pIdx which is an index on pTab. +** iCur is the index of a cursor open on the pTab table and pointing to +** the entry that needs indexing. +*/ +void sqlite3GenerateIndexKey( + Vdbe *v, /* Generate code into this VDBE */ + Index *pIdx, /* The index for which to generate a key */ + int iCur /* Cursor number for the pIdx->pTable table */ +){ + int j; + Table *pTab = pIdx->pTable; + + sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); + for(j=0; jnColumn; j++){ + int idx = pIdx->aiColumn[j]; + if( idx==pTab->iPKey ){ + sqlite3VdbeAddOp(v, OP_Dup, j, 0); + }else{ + sqlite3VdbeAddOp(v, OP_Column, iCur, idx); + sqlite3ColumnDefault(v, pTab, idx); + } + } + sqlite3VdbeAddOp(v, OP_MakeIdxRec, pIdx->nColumn, 0); + sqlite3IndexAffinityStr(v, pIdx); +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/dump.txt b/libraries/sqlite/unix/sqlite-3.5.1/src/dump.txt new file mode 100644 index 0000000000..55560ef057 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/dump.txt @@ -0,0 +1,2267 @@ +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** 2003 September 6 +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** The author disclaims copyright to this source code. In place of +1.1 (drh 06-Sep-03): ** a legal notice, here is a blessing: +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** May you do good and not evil. +1.1 (drh 06-Sep-03): ** May you find forgiveness for yourself and forgive others. +1.1 (drh 06-Sep-03): ** May you share freely, never taking more than you give. +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ************************************************************************* +1.1 (drh 06-Sep-03): ** This file contains code used for creating, destroying, and populating +1.65 (danielk1 26-May-04): ** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.) Prior +1.1 (drh 06-Sep-03): ** to version 2.8.7, all this code was combined into the vdbe.c source file. +1.1 (drh 06-Sep-03): ** But that file was getting too big so this subroutines were split out. +1.1 (drh 06-Sep-03): */ +1.1 (drh 06-Sep-03): #include "sqliteInt.h" +1.1 (drh 06-Sep-03): #include +1.1 (drh 06-Sep-03): #include "vdbeInt.h" +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): +1.311 (drh 27-Aug-07): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** When debugging the code generator in a symbolic debugger, one can +1.24 (danielk1 10-May-04): ** set the sqlite3_vdbe_addop_trace to 1 and all opcodes will be printed +1.1 (drh 06-Sep-03): ** as they are added to the instruction stream. +1.1 (drh 06-Sep-03): */ +1.182 (drh 14-Jun-05): #ifdef SQLITE_DEBUG +1.24 (danielk1 10-May-04): int sqlite3_vdbe_addop_trace = 0; +1.1 (drh 06-Sep-03): #endif +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Create a new virtual database engine. +1.1 (drh 06-Sep-03): */ +1.140 (drh 06-Sep-04): Vdbe *sqlite3VdbeCreate(sqlite3 *db){ +1.1 (drh 06-Sep-03): Vdbe *p; +1.299 (drh 16-Aug-07): p = sqlite3DbMallocZero(db, sizeof(Vdbe) ); +1.1 (drh 06-Sep-03): if( p==0 ) return 0; +1.1 (drh 06-Sep-03): p->db = db; +1.1 (drh 06-Sep-03): if( db->pVdbe ){ +1.1 (drh 06-Sep-03): db->pVdbe->pPrev = p; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): p->pNext = db->pVdbe; +1.1 (drh 06-Sep-03): p->pPrev = 0; +1.1 (drh 06-Sep-03): db->pVdbe = p; +1.1 (drh 06-Sep-03): p->magic = VDBE_MAGIC_INIT; +1.1 (drh 06-Sep-03): return p; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.268 (drh 09-Nov-06): ** Remember the SQL string for a prepared statement. +1.268 (drh 09-Nov-06): */ +1.268 (drh 09-Nov-06): void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n){ +1.268 (drh 09-Nov-06): if( p==0 ) return; +1.268 (drh 09-Nov-06): assert( p->zSql==0 ); +1.299 (drh 16-Aug-07): p->zSql = sqlite3DbStrNDup(p->db, z, n); +1.268 (drh 09-Nov-06): } +1.268 (drh 09-Nov-06): +1.268 (drh 09-Nov-06): /* +1.268 (drh 09-Nov-06): ** Return the SQL associated with a prepared statement +1.268 (drh 09-Nov-06): */ +1.268 (drh 09-Nov-06): const char *sqlite3VdbeGetSql(Vdbe *p){ +1.268 (drh 09-Nov-06): return p->zSql; +1.268 (drh 09-Nov-06): } +1.268 (drh 09-Nov-06): +1.268 (drh 09-Nov-06): /* +1.269 (drh 08-Jan-07): ** Swap all content between two VDBE structures. +1.268 (drh 09-Nov-06): */ +1.269 (drh 08-Jan-07): void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){ +1.269 (drh 08-Jan-07): Vdbe tmp, *pTmp; +1.269 (drh 08-Jan-07): char *zTmp; +1.269 (drh 08-Jan-07): int nTmp; +1.269 (drh 08-Jan-07): tmp = *pA; +1.269 (drh 08-Jan-07): *pA = *pB; +1.269 (drh 08-Jan-07): *pB = tmp; +1.269 (drh 08-Jan-07): pTmp = pA->pNext; +1.269 (drh 08-Jan-07): pA->pNext = pB->pNext; +1.269 (drh 08-Jan-07): pB->pNext = pTmp; +1.269 (drh 08-Jan-07): pTmp = pA->pPrev; +1.269 (drh 08-Jan-07): pA->pPrev = pB->pPrev; +1.269 (drh 08-Jan-07): pB->pPrev = pTmp; +1.269 (drh 08-Jan-07): zTmp = pA->zSql; +1.269 (drh 08-Jan-07): pA->zSql = pB->zSql; +1.269 (drh 08-Jan-07): pB->zSql = zTmp; +1.269 (drh 08-Jan-07): nTmp = pA->nSql; +1.269 (drh 08-Jan-07): pA->nSql = pB->nSql; +1.269 (drh 08-Jan-07): pB->nSql = nTmp; +1.268 (drh 09-Nov-06): } +1.268 (drh 09-Nov-06): +1.287 (drh 08-May-07): #ifdef SQLITE_DEBUG +1.268 (drh 09-Nov-06): /* +1.1 (drh 06-Sep-03): ** Turn tracing on or off +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbeTrace(Vdbe *p, FILE *trace){ +1.1 (drh 06-Sep-03): p->trace = trace; +1.1 (drh 06-Sep-03): } +1.287 (drh 08-May-07): #endif +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.146 (drh 24-Sep-04): ** Resize the Vdbe.aOp array so that it contains at least N +1.170 (danielk1 28-Mar-05): ** elements. If the Vdbe is in VDBE_MAGIC_RUN state, then +1.235 (danielk1 26-Jan-06): ** the Vdbe.aOp array will be sized to contain exactly N +1.235 (danielk1 26-Jan-06): ** elements. Vdbe.nOpAlloc is set to reflect the new size of +1.235 (danielk1 26-Jan-06): ** the array. +1.235 (danielk1 26-Jan-06): ** +1.235 (danielk1 26-Jan-06): ** If an out-of-memory error occurs while resizing the array, +1.235 (danielk1 26-Jan-06): ** Vdbe.aOp and Vdbe.nOpAlloc remain unchanged (this is so that +1.235 (danielk1 26-Jan-06): ** any opcodes already allocated can be correctly deallocated +1.235 (danielk1 26-Jan-06): ** along with the rest of the Vdbe). +1.146 (drh 24-Sep-04): */ +1.146 (drh 24-Sep-04): static void resizeOpArray(Vdbe *p, int N){ +1.199 (drh 16-Sep-05): int runMode = p->magic==VDBE_MAGIC_RUN; +1.199 (drh 16-Sep-05): if( runMode || p->nOpAllocnOpAlloc; +1.315 (danielk1 29-Aug-07): pNew = sqlite3DbRealloc(p->db, p->aOp, nNew*sizeof(Op)); +1.198 (drh 16-Sep-05): if( pNew ){ +1.199 (drh 16-Sep-05): p->nOpAlloc = nNew; +1.198 (drh 16-Sep-05): p->aOp = pNew; +1.199 (drh 16-Sep-05): if( nNew>oldSize ){ +1.199 (drh 16-Sep-05): memset(&p->aOp[oldSize], 0, (nNew-oldSize)*sizeof(Op)); +1.199 (drh 16-Sep-05): } +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): +1.146 (drh 24-Sep-04): /* +1.1 (drh 06-Sep-03): ** Add a new instruction to the list of instructions current in the +1.1 (drh 06-Sep-03): ** VDBE. Return the address of the new instruction. +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** Parameters: +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** p Pointer to the VDBE +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** op The opcode for this instruction +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** p1, p2 First two of the three possible operands. +1.1 (drh 06-Sep-03): ** +1.19 (danielk1 08-May-04): ** Use the sqlite3VdbeResolveLabel() function to fix an address and +1.19 (danielk1 08-May-04): ** the sqlite3VdbeChangeP3() function to change the value of the P3 +1.1 (drh 06-Sep-03): ** operand. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): int sqlite3VdbeAddOp(Vdbe *p, int op, int p1, int p2){ +1.1 (drh 06-Sep-03): int i; +1.18 (drh 22-Feb-04): VdbeOp *pOp; +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): i = p->nOp; +1.1 (drh 06-Sep-03): assert( p->magic==VDBE_MAGIC_INIT ); +1.241 (drh 15-Mar-06): if( p->nOpAlloc<=i ){ +1.241 (drh 15-Mar-06): resizeOpArray(p, i+1); +1.299 (drh 16-Aug-07): if( p->db->mallocFailed ){ +1.241 (drh 15-Mar-06): return 0; +1.241 (drh 15-Mar-06): } +1.1 (drh 06-Sep-03): } +1.282 (danielk1 18-Apr-07): p->nOp++; +1.18 (drh 22-Feb-04): pOp = &p->aOp[i]; +1.18 (drh 22-Feb-04): pOp->opcode = op; +1.18 (drh 22-Feb-04): pOp->p1 = p1; +1.18 (drh 22-Feb-04): pOp->p2 = p2; +1.18 (drh 22-Feb-04): pOp->p3 = 0; +1.18 (drh 22-Feb-04): pOp->p3type = P3_NOTUSED; +1.185 (drh 14-Aug-05): p->expired = 0; +1.156 (danielk1 12-Jan-05): #ifdef SQLITE_DEBUG +1.24 (danielk1 10-May-04): if( sqlite3_vdbe_addop_trace ) sqlite3VdbePrintOp(0, i, &p->aOp[i]); +1.1 (drh 06-Sep-03): #endif +1.1 (drh 06-Sep-03): return i; +1.1 (drh 06-Sep-03): } +1.18 (drh 22-Feb-04): +1.18 (drh 22-Feb-04): /* +1.18 (drh 22-Feb-04): ** Add an opcode that includes the p3 value. +1.18 (drh 22-Feb-04): */ +1.69 (drh 27-May-04): int sqlite3VdbeOp3(Vdbe *p, int op, int p1, int p2, const char *zP3,int p3type){ +1.19 (danielk1 08-May-04): int addr = sqlite3VdbeAddOp(p, op, p1, p2); +1.19 (danielk1 08-May-04): sqlite3VdbeChangeP3(p, addr, zP3, p3type); +1.18 (drh 22-Feb-04): return addr; +1.18 (drh 22-Feb-04): } +1.18 (drh 22-Feb-04): +1.18 (drh 22-Feb-04): /* +1.1 (drh 06-Sep-03): ** Create a new symbolic label for an instruction that has yet to be +1.1 (drh 06-Sep-03): ** coded. The symbolic label is really just a negative number. The +1.1 (drh 06-Sep-03): ** label can be used as the P2 value of an operation. Later, when +1.1 (drh 06-Sep-03): ** the label is resolved to a specific address, the VDBE will scan +1.1 (drh 06-Sep-03): ** through its operation list and change all values of P2 which match +1.1 (drh 06-Sep-03): ** the label into the resolved address. +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** The VDBE knows that a P2 value is a label because labels are +1.1 (drh 06-Sep-03): ** always negative and P2 values are suppose to be non-negative. +1.1 (drh 06-Sep-03): ** Hence, a negative P2 value is a label that has yet to be resolved. +1.129 (danielk1 26-Jun-04): ** +1.129 (danielk1 26-Jun-04): ** Zero is returned if a malloc() fails. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): int sqlite3VdbeMakeLabel(Vdbe *p){ +1.1 (drh 06-Sep-03): int i; +1.1 (drh 06-Sep-03): i = p->nLabel++; +1.1 (drh 06-Sep-03): assert( p->magic==VDBE_MAGIC_INIT ); +1.1 (drh 06-Sep-03): if( i>=p->nLabelAlloc ){ +1.1 (drh 06-Sep-03): p->nLabelAlloc = p->nLabelAlloc*2 + 10; +1.300 (danielk1 16-Aug-07): p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel, +1.273 (drh 27-Mar-07): p->nLabelAlloc*sizeof(p->aLabel[0])); +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): if( p->aLabel ){ +1.146 (drh 24-Sep-04): p->aLabel[i] = -1; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): return -1-i; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Resolve label "x" to be the address of the next instruction to +1.1 (drh 06-Sep-03): ** be inserted. The parameter "x" must have been obtained from +1.19 (danielk1 08-May-04): ** a prior call to sqlite3VdbeMakeLabel(). +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbeResolveLabel(Vdbe *p, int x){ +1.146 (drh 24-Sep-04): int j = -1-x; +1.1 (drh 06-Sep-03): assert( p->magic==VDBE_MAGIC_INIT ); +1.146 (drh 24-Sep-04): assert( j>=0 && jnLabel ); +1.146 (drh 24-Sep-04): if( p->aLabel ){ +1.146 (drh 24-Sep-04): p->aLabel[j] = p->nOp; +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): +1.146 (drh 24-Sep-04): /* +1.171 (danielk1 29-Mar-05): ** Return non-zero if opcode 'op' is guarenteed not to push more values +1.171 (danielk1 29-Mar-05): ** onto the VDBE stack than it pops off. +1.171 (danielk1 29-Mar-05): */ +1.172 (danielk1 29-Mar-05): static int opcodeNoPush(u8 op){ +1.172 (danielk1 29-Mar-05): /* The 10 NOPUSH_MASK_n constants are defined in the automatically +1.171 (danielk1 29-Mar-05): ** generated header file opcodes.h. Each is a 16-bit bitmask, one +1.171 (danielk1 29-Mar-05): ** bit corresponding to each opcode implemented by the virtual +1.172 (danielk1 29-Mar-05): ** machine in vdbe.c. The bit is true if the word "no-push" appears +1.171 (danielk1 29-Mar-05): ** in a comment on the same line as the "case OP_XXX:" in +1.171 (danielk1 29-Mar-05): ** sqlite3VdbeExec() in vdbe.c. +1.171 (danielk1 29-Mar-05): ** +1.171 (danielk1 29-Mar-05): ** If the bit is true, then the corresponding opcode is guarenteed not +1.171 (danielk1 29-Mar-05): ** to grow the stack when it is executed. Otherwise, it may grow the +1.171 (danielk1 29-Mar-05): ** stack by at most one entry. +1.171 (danielk1 29-Mar-05): ** +1.172 (danielk1 29-Mar-05): ** NOPUSH_MASK_0 corresponds to opcodes 0 to 15. NOPUSH_MASK_1 contains +1.171 (danielk1 29-Mar-05): ** one bit for opcodes 16 to 31, and so on. +1.171 (danielk1 29-Mar-05): ** +1.171 (danielk1 29-Mar-05): ** 16-bit bitmasks (rather than 32-bit) are specified in opcodes.h +1.171 (danielk1 29-Mar-05): ** because the file is generated by an awk program. Awk manipulates +1.171 (danielk1 29-Mar-05): ** all numbers as floating-point and we don't want to risk a rounding +1.171 (danielk1 29-Mar-05): ** error if someone builds with an awk that uses (for example) 32-bit +1.171 (danielk1 29-Mar-05): ** IEEE floats. +1.171 (danielk1 29-Mar-05): */ +1.173 (drh 31-Mar-05): static const u32 masks[5] = { +1.238 (drh 24-Feb-06): NOPUSH_MASK_0 + (((unsigned)NOPUSH_MASK_1)<<16), +1.238 (drh 24-Feb-06): NOPUSH_MASK_2 + (((unsigned)NOPUSH_MASK_3)<<16), +1.238 (drh 24-Feb-06): NOPUSH_MASK_4 + (((unsigned)NOPUSH_MASK_5)<<16), +1.238 (drh 24-Feb-06): NOPUSH_MASK_6 + (((unsigned)NOPUSH_MASK_7)<<16), +1.238 (drh 24-Feb-06): NOPUSH_MASK_8 + (((unsigned)NOPUSH_MASK_9)<<16) +1.171 (danielk1 29-Mar-05): }; +1.206 (drh 29-Nov-05): assert( op<32*5 ); +1.171 (danielk1 29-Mar-05): return (masks[op>>5] & (1<<(op&0x1F))); +1.171 (danielk1 29-Mar-05): } +1.171 (danielk1 29-Mar-05): +1.171 (danielk1 29-Mar-05): #ifndef NDEBUG +1.172 (danielk1 29-Mar-05): int sqlite3VdbeOpcodeNoPush(u8 op){ +1.172 (danielk1 29-Mar-05): return opcodeNoPush(op); +1.171 (danielk1 29-Mar-05): } +1.171 (danielk1 29-Mar-05): #endif +1.171 (danielk1 29-Mar-05): +1.171 (danielk1 29-Mar-05): /* +1.146 (drh 24-Sep-04): ** Loop through the program looking for P2 values that are negative. +1.146 (drh 24-Sep-04): ** Each such value is a label. Resolve the label by setting the P2 +1.146 (drh 24-Sep-04): ** value to its correct non-zero value. +1.146 (drh 24-Sep-04): ** +1.146 (drh 24-Sep-04): ** This routine is called once after all opcodes have been inserted. +1.170 (danielk1 28-Mar-05): ** +1.195 (drh 07-Sep-05): ** Variable *pMaxFuncArgs is set to the maximum value of any P2 argument +1.247 (danielk1 14-Jun-06): ** to an OP_Function, OP_AggStep or OP_VFilter opcode. This is used by +1.170 (danielk1 28-Mar-05): ** sqlite3VdbeMakeReady() to size the Vdbe.apArg[] array. +1.171 (danielk1 29-Mar-05): ** +1.171 (danielk1 29-Mar-05): ** The integer *pMaxStack is set to the maximum number of vdbe stack +1.171 (danielk1 29-Mar-05): ** entries that static analysis reveals this program might need. +1.180 (drh 07-Jun-05): ** +1.180 (drh 07-Jun-05): ** This routine also does the following optimization: It scans for +1.180 (drh 07-Jun-05): ** Halt instructions where P1==SQLITE_CONSTRAINT or P2==OE_Abort or for +1.181 (drh 12-Jun-05): ** IdxInsert instructions where P2!=0. If no such instruction is +1.180 (drh 07-Jun-05): ** found, then every Statement instruction is changed to a Noop. In +1.180 (drh 07-Jun-05): ** this way, we avoid creating the statement journal file unnecessarily. +1.146 (drh 24-Sep-04): */ +1.171 (danielk1 29-Mar-05): static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs, int *pMaxStack){ +1.146 (drh 24-Sep-04): int i; +1.171 (danielk1 29-Mar-05): int nMaxArgs = 0; +1.171 (danielk1 29-Mar-05): int nMaxStack = p->nOp; +1.146 (drh 24-Sep-04): Op *pOp; +1.146 (drh 24-Sep-04): int *aLabel = p->aLabel; +1.180 (drh 07-Jun-05): int doesStatementRollback = 0; +1.180 (drh 07-Jun-05): int hasStatementBegin = 0; +1.146 (drh 24-Sep-04): for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){ +1.170 (danielk1 28-Mar-05): u8 opcode = pOp->opcode; +1.170 (danielk1 28-Mar-05): +1.251 (danielk1 20-Jun-06): if( opcode==OP_Function || opcode==OP_AggStep +1.247 (danielk1 14-Jun-06): #ifndef SQLITE_OMIT_VIRTUALTABLE +1.251 (danielk1 20-Jun-06): || opcode==OP_VUpdate +1.247 (danielk1 14-Jun-06): #endif +1.247 (danielk1 14-Jun-06): ){ +1.171 (danielk1 29-Mar-05): if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; +1.294 (danielk1 27-Jun-07): } +1.294 (danielk1 27-Jun-07): if( opcode==OP_Halt ){ +1.180 (drh 07-Jun-05): if( pOp->p1==SQLITE_CONSTRAINT && pOp->p2==OE_Abort ){ +1.180 (drh 07-Jun-05): doesStatementRollback = 1; +1.180 (drh 07-Jun-05): } +1.180 (drh 07-Jun-05): }else if( opcode==OP_Statement ){ +1.180 (drh 07-Jun-05): hasStatementBegin = 1; +1.294 (danielk1 27-Jun-07): #ifndef SQLITE_OMIT_VIRTUALTABLE +1.294 (danielk1 27-Jun-07): }else if( opcode==OP_VUpdate || opcode==OP_VRename ){ +1.294 (danielk1 27-Jun-07): doesStatementRollback = 1; +1.246 (drh 13-Jun-06): }else if( opcode==OP_VFilter ){ +1.246 (drh 13-Jun-06): int n; +1.246 (drh 13-Jun-06): assert( p->nOp - i >= 3 ); +1.246 (drh 13-Jun-06): assert( pOp[-2].opcode==OP_Integer ); +1.246 (drh 13-Jun-06): n = pOp[-2].p1; +1.246 (drh 13-Jun-06): if( n>nMaxArgs ) nMaxArgs = n; +1.294 (danielk1 27-Jun-07): #endif +1.171 (danielk1 29-Mar-05): } +1.172 (danielk1 29-Mar-05): if( opcodeNoPush(opcode) ){ +1.171 (danielk1 29-Mar-05): nMaxStack--; +1.170 (danielk1 28-Mar-05): } +1.170 (danielk1 28-Mar-05): +1.146 (drh 24-Sep-04): if( pOp->p2>=0 ) continue; +1.146 (drh 24-Sep-04): assert( -1-pOp->p2nLabel ); +1.146 (drh 24-Sep-04): pOp->p2 = aLabel[-1-pOp->p2]; +1.1 (drh 06-Sep-03): } +1.299 (drh 16-Aug-07): sqlite3_free(p->aLabel); +1.146 (drh 24-Sep-04): p->aLabel = 0; +1.171 (danielk1 29-Mar-05): +1.171 (danielk1 29-Mar-05): *pMaxFuncArgs = nMaxArgs; +1.171 (danielk1 29-Mar-05): *pMaxStack = nMaxStack; +1.180 (drh 07-Jun-05): +1.180 (drh 07-Jun-05): /* If we never rollback a statement transaction, then statement +1.180 (drh 07-Jun-05): ** transactions are not needed. So change every OP_Statement +1.218 (drh 06-Jan-06): ** opcode into an OP_Noop. This avoid a call to sqlite3OsOpenExclusive() +1.180 (drh 07-Jun-05): ** which can be expensive on some platforms. +1.180 (drh 07-Jun-05): */ +1.180 (drh 07-Jun-05): if( hasStatementBegin && !doesStatementRollback ){ +1.180 (drh 07-Jun-05): for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){ +1.180 (drh 07-Jun-05): if( pOp->opcode==OP_Statement ){ +1.180 (drh 07-Jun-05): pOp->opcode = OP_Noop; +1.180 (drh 07-Jun-05): } +1.180 (drh 07-Jun-05): } +1.180 (drh 07-Jun-05): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Return the address of the next instruction to be inserted. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): int sqlite3VdbeCurrentAddr(Vdbe *p){ +1.1 (drh 06-Sep-03): assert( p->magic==VDBE_MAGIC_INIT ); +1.1 (drh 06-Sep-03): return p->nOp; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Add a whole list of operations to the operation stack. Return the +1.1 (drh 06-Sep-03): ** address of the first operation added. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): int sqlite3VdbeAddOpList(Vdbe *p, int nOp, VdbeOpList const *aOp){ +1.1 (drh 06-Sep-03): int addr; +1.1 (drh 06-Sep-03): assert( p->magic==VDBE_MAGIC_INIT ); +1.146 (drh 24-Sep-04): resizeOpArray(p, p->nOp + nOp); +1.299 (drh 16-Aug-07): if( p->db->mallocFailed ){ +1.146 (drh 24-Sep-04): return 0; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): addr = p->nOp; +1.1 (drh 06-Sep-03): if( nOp>0 ){ +1.1 (drh 06-Sep-03): int i; +1.16 (drh 21-Feb-04): VdbeOpList const *pIn = aOp; +1.16 (drh 21-Feb-04): for(i=0; ip2; +1.16 (drh 21-Feb-04): VdbeOp *pOut = &p->aOp[i+addr]; +1.16 (drh 21-Feb-04): pOut->opcode = pIn->opcode; +1.16 (drh 21-Feb-04): pOut->p1 = pIn->p1; +1.16 (drh 21-Feb-04): pOut->p2 = p2<0 ? addr + ADDR(p2) : p2; +1.16 (drh 21-Feb-04): pOut->p3 = pIn->p3; +1.16 (drh 21-Feb-04): pOut->p3type = pIn->p3 ? P3_STATIC : P3_NOTUSED; +1.156 (danielk1 12-Jan-05): #ifdef SQLITE_DEBUG +1.24 (danielk1 10-May-04): if( sqlite3_vdbe_addop_trace ){ +1.19 (danielk1 08-May-04): sqlite3VdbePrintOp(0, i+addr, &p->aOp[i+addr]); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): #endif +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): p->nOp += nOp; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): return addr; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Change the value of the P1 operand for a specific instruction. +1.1 (drh 06-Sep-03): ** This routine is useful when a large program is loaded from a +1.19 (danielk1 08-May-04): ** static array using sqlite3VdbeAddOpList but we want to make a +1.1 (drh 06-Sep-03): ** few minor changes to the program. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbeChangeP1(Vdbe *p, int addr, int val){ +1.240 (drh 13-Mar-06): assert( p==0 || p->magic==VDBE_MAGIC_INIT ); +1.1 (drh 06-Sep-03): if( p && addr>=0 && p->nOp>addr && p->aOp ){ +1.1 (drh 06-Sep-03): p->aOp[addr].p1 = val; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Change the value of the P2 operand for a specific instruction. +1.1 (drh 06-Sep-03): ** This routine is useful for setting a jump destination. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbeChangeP2(Vdbe *p, int addr, int val){ +1.1 (drh 06-Sep-03): assert( val>=0 ); +1.240 (drh 13-Mar-06): assert( p==0 || p->magic==VDBE_MAGIC_INIT ); +1.1 (drh 06-Sep-03): if( p && addr>=0 && p->nOp>addr && p->aOp ){ +1.1 (drh 06-Sep-03): p->aOp[addr].p2 = val; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.202 (drh 20-Sep-05): /* +1.242 (drh 17-Mar-06): ** Change the P2 operand of instruction addr so that it points to +1.202 (drh 20-Sep-05): ** the address of the next instruction to be coded. +1.202 (drh 20-Sep-05): */ +1.202 (drh 20-Sep-05): void sqlite3VdbeJumpHere(Vdbe *p, int addr){ +1.202 (drh 20-Sep-05): sqlite3VdbeChangeP2(p, addr, p->nOp); +1.202 (drh 20-Sep-05): } +1.198 (drh 16-Sep-05): +1.257 (drh 08-Jul-06): +1.257 (drh 08-Jul-06): /* +1.257 (drh 08-Jul-06): ** If the input FuncDef structure is ephemeral, then free it. If +1.257 (drh 08-Jul-06): ** the FuncDef is not ephermal, then do nothing. +1.257 (drh 08-Jul-06): */ +1.257 (drh 08-Jul-06): static void freeEphemeralFunction(FuncDef *pDef){ +1.257 (drh 08-Jul-06): if( pDef && (pDef->flags & SQLITE_FUNC_EPHEM)!=0 ){ +1.299 (drh 16-Aug-07): sqlite3_free(pDef); +1.257 (drh 08-Jul-06): } +1.257 (drh 08-Jul-06): } +1.257 (drh 08-Jul-06): +1.198 (drh 16-Sep-05): /* +1.198 (drh 16-Sep-05): ** Delete a P3 value if necessary. +1.198 (drh 16-Sep-05): */ +1.198 (drh 16-Sep-05): static void freeP3(int p3type, void *p3){ +1.198 (drh 16-Sep-05): if( p3 ){ +1.201 (drh 17-Sep-05): switch( p3type ){ +1.201 (drh 17-Sep-05): case P3_DYNAMIC: +1.201 (drh 17-Sep-05): case P3_KEYINFO: +1.201 (drh 17-Sep-05): case P3_KEYINFO_HANDOFF: { +1.299 (drh 16-Aug-07): sqlite3_free(p3); +1.201 (drh 17-Sep-05): break; +1.201 (drh 17-Sep-05): } +1.246 (drh 13-Jun-06): case P3_MPRINTF: { +1.246 (drh 13-Jun-06): sqlite3_free(p3); +1.246 (drh 13-Jun-06): break; +1.246 (drh 13-Jun-06): } +1.201 (drh 17-Sep-05): case P3_VDBEFUNC: { +1.201 (drh 17-Sep-05): VdbeFunc *pVdbeFunc = (VdbeFunc *)p3; +1.257 (drh 08-Jul-06): freeEphemeralFunction(pVdbeFunc->pFunc); +1.201 (drh 17-Sep-05): sqlite3VdbeDeleteAuxData(pVdbeFunc, 0); +1.299 (drh 16-Aug-07): sqlite3_free(pVdbeFunc); +1.201 (drh 17-Sep-05): break; +1.201 (drh 17-Sep-05): } +1.257 (drh 08-Jul-06): case P3_FUNCDEF: { +1.257 (drh 08-Jul-06): freeEphemeralFunction((FuncDef*)p3); +1.257 (drh 08-Jul-06): break; +1.257 (drh 08-Jul-06): } +1.201 (drh 17-Sep-05): case P3_MEM: { +1.201 (drh 17-Sep-05): sqlite3ValueFree((sqlite3_value*)p3); +1.201 (drh 17-Sep-05): break; +1.201 (drh 17-Sep-05): } +1.198 (drh 16-Sep-05): } +1.198 (drh 16-Sep-05): } +1.198 (drh 16-Sep-05): } +1.198 (drh 16-Sep-05): +1.198 (drh 16-Sep-05): +1.1 (drh 06-Sep-03): /* +1.242 (drh 17-Mar-06): ** Change N opcodes starting at addr to No-ops. +1.242 (drh 17-Mar-06): */ +1.242 (drh 17-Mar-06): void sqlite3VdbeChangeToNoop(Vdbe *p, int addr, int N){ +1.285 (danielk1 04-May-07): if( p && p->aOp ){ +1.285 (danielk1 04-May-07): VdbeOp *pOp = &p->aOp[addr]; +1.285 (danielk1 04-May-07): while( N-- ){ +1.285 (danielk1 04-May-07): freeP3(pOp->p3type, pOp->p3); +1.285 (danielk1 04-May-07): memset(pOp, 0, sizeof(pOp[0])); +1.285 (danielk1 04-May-07): pOp->opcode = OP_Noop; +1.285 (danielk1 04-May-07): pOp++; +1.285 (danielk1 04-May-07): } +1.242 (drh 17-Mar-06): } +1.242 (drh 17-Mar-06): } +1.242 (drh 17-Mar-06): +1.242 (drh 17-Mar-06): /* +1.1 (drh 06-Sep-03): ** Change the value of the P3 operand for a specific instruction. +1.1 (drh 06-Sep-03): ** This routine is useful when a large program is loaded from a +1.19 (danielk1 08-May-04): ** static array using sqlite3VdbeAddOpList but we want to make a +1.1 (drh 06-Sep-03): ** few minor changes to the program. +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** If n>=0 then the P3 operand is dynamic, meaning that a copy of +1.299 (drh 16-Aug-07): ** the string is made into memory obtained from sqlite3_malloc(). +1.1 (drh 06-Sep-03): ** A value of n==0 means copy bytes of zP3 up to and including the +1.1 (drh 06-Sep-03): ** first null byte. If n>0 then copy n+1 bytes of zP3. +1.1 (drh 06-Sep-03): ** +1.176 (danielk1 19-May-05): ** If n==P3_KEYINFO it means that zP3 is a pointer to a KeyInfo structure. +1.176 (danielk1 19-May-05): ** A copy is made of the KeyInfo structure into memory obtained from +1.299 (drh 16-Aug-07): ** sqlite3_malloc, to be freed when the Vdbe is finalized. +1.176 (danielk1 19-May-05): ** n==P3_KEYINFO_HANDOFF indicates that zP3 points to a KeyInfo structure +1.299 (drh 16-Aug-07): ** stored in memory that the caller has obtained from sqlite3_malloc. The +1.176 (danielk1 19-May-05): ** caller should not free the allocation, it will be freed when the Vdbe is +1.176 (danielk1 19-May-05): ** finalized. +1.176 (danielk1 19-May-05): ** +1.176 (danielk1 19-May-05): ** Other values of n (P3_STATIC, P3_COLLSEQ etc.) indicate that zP3 points +1.176 (danielk1 19-May-05): ** to a string or structure that is guaranteed to exist for the lifetime of +1.176 (danielk1 19-May-05): ** the Vdbe. In these cases we can just copy the pointer. +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** If addr<0 then change P3 on the most recently inserted instruction. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbeChangeP3(Vdbe *p, int addr, const char *zP3, int n){ +1.1 (drh 06-Sep-03): Op *pOp; +1.240 (drh 13-Mar-06): assert( p==0 || p->magic==VDBE_MAGIC_INIT ); +1.299 (drh 16-Aug-07): if( p==0 || p->aOp==0 || p->db->mallocFailed ){ +1.208 (danielk1 06-Dec-05): if (n != P3_KEYINFO) { +1.208 (danielk1 06-Dec-05): freeP3(n, (void*)*(char**)&zP3); +1.208 (danielk1 06-Dec-05): } +1.168 (danielk1 16-Mar-05): return; +1.168 (danielk1 16-Mar-05): } +1.1 (drh 06-Sep-03): if( addr<0 || addr>=p->nOp ){ +1.1 (drh 06-Sep-03): addr = p->nOp - 1; +1.1 (drh 06-Sep-03): if( addr<0 ) return; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): pOp = &p->aOp[addr]; +1.198 (drh 16-Sep-05): freeP3(pOp->p3type, pOp->p3); +1.198 (drh 16-Sep-05): pOp->p3 = 0; +1.1 (drh 06-Sep-03): if( zP3==0 ){ +1.1 (drh 06-Sep-03): pOp->p3 = 0; +1.1 (drh 06-Sep-03): pOp->p3type = P3_NOTUSED; +1.50 (drh 20-May-04): }else if( n==P3_KEYINFO ){ +1.50 (drh 20-May-04): KeyInfo *pKeyInfo; +1.50 (drh 20-May-04): int nField, nByte; +1.192 (drh 01-Sep-05): +1.50 (drh 20-May-04): nField = ((KeyInfo*)zP3)->nField; +1.211 (drh 16-Dec-05): nByte = sizeof(*pKeyInfo) + (nField-1)*sizeof(pKeyInfo->aColl[0]) + nField; +1.299 (drh 16-Aug-07): pKeyInfo = sqlite3_malloc( nByte ); +1.50 (drh 20-May-04): pOp->p3 = (char*)pKeyInfo; +1.50 (drh 20-May-04): if( pKeyInfo ){ +1.226 (danielk1 16-Jan-06): unsigned char *aSortOrder; +1.50 (drh 20-May-04): memcpy(pKeyInfo, zP3, nByte); +1.211 (drh 16-Dec-05): aSortOrder = pKeyInfo->aSortOrder; +1.211 (drh 16-Dec-05): if( aSortOrder ){ +1.226 (danielk1 16-Jan-06): pKeyInfo->aSortOrder = (unsigned char*)&pKeyInfo->aColl[nField]; +1.211 (drh 16-Dec-05): memcpy(pKeyInfo->aSortOrder, aSortOrder, nField); +1.211 (drh 16-Dec-05): } +1.50 (drh 20-May-04): pOp->p3type = P3_KEYINFO; +1.50 (drh 20-May-04): }else{ +1.299 (drh 16-Aug-07): p->db->mallocFailed = 1; +1.50 (drh 20-May-04): pOp->p3type = P3_NOTUSED; +1.50 (drh 20-May-04): } +1.51 (drh 21-May-04): }else if( n==P3_KEYINFO_HANDOFF ){ +1.51 (drh 21-May-04): pOp->p3 = (char*)zP3; +1.51 (drh 21-May-04): pOp->p3type = P3_KEYINFO; +1.1 (drh 06-Sep-03): }else if( n<0 ){ +1.1 (drh 06-Sep-03): pOp->p3 = (char*)zP3; +1.1 (drh 06-Sep-03): pOp->p3type = n; +1.1 (drh 06-Sep-03): }else{ +1.147 (drh 25-Sep-04): if( n==0 ) n = strlen(zP3); +1.299 (drh 16-Aug-07): pOp->p3 = sqlite3DbStrNDup(p->db, zP3, n); +1.1 (drh 06-Sep-03): pOp->p3type = P3_DYNAMIC; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.144 (drh 19-Sep-04): #ifndef NDEBUG +1.144 (drh 19-Sep-04): /* +1.144 (drh 19-Sep-04): ** Replace the P3 field of the most recently coded instruction with +1.144 (drh 19-Sep-04): ** comment text. +1.144 (drh 19-Sep-04): */ +1.144 (drh 19-Sep-04): void sqlite3VdbeComment(Vdbe *p, const char *zFormat, ...){ +1.144 (drh 19-Sep-04): va_list ap; +1.282 (danielk1 18-Apr-07): assert( p->nOp>0 || p->aOp==0 ); +1.299 (drh 16-Aug-07): assert( p->aOp==0 || p->aOp[p->nOp-1].p3==0 || p->db->mallocFailed ); +1.144 (drh 19-Sep-04): va_start(ap, zFormat); +1.300 (danielk1 16-Aug-07): sqlite3VdbeChangeP3(p, -1, sqlite3VMPrintf(p->db, zFormat, ap), P3_DYNAMIC); +1.144 (drh 19-Sep-04): va_end(ap); +1.144 (drh 19-Sep-04): } +1.144 (drh 19-Sep-04): #endif +1.144 (drh 19-Sep-04): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Return the opcode for a given address. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ +1.1 (drh 06-Sep-03): assert( p->magic==VDBE_MAGIC_INIT ); +1.299 (drh 16-Aug-07): assert( (addr>=0 && addrnOp) || p->db->mallocFailed ); +1.282 (danielk1 18-Apr-07): return ((addr>=0 && addrnOp)?(&p->aOp[addr]):0); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.151 (drh 31-Oct-04): #if !defined(SQLITE_OMIT_EXPLAIN) || !defined(NDEBUG) \ +1.151 (drh 31-Oct-04): || defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) +1.1 (drh 06-Sep-03): /* +1.50 (drh 20-May-04): ** Compute a string that describes the P3 parameter for an opcode. +1.50 (drh 20-May-04): ** Use zTemp for any required temporary buffer space. +1.50 (drh 20-May-04): */ +1.50 (drh 20-May-04): static char *displayP3(Op *pOp, char *zTemp, int nTemp){ +1.50 (drh 20-May-04): char *zP3; +1.50 (drh 20-May-04): assert( nTemp>=20 ); +1.50 (drh 20-May-04): switch( pOp->p3type ){ +1.50 (drh 20-May-04): case P3_KEYINFO: { +1.50 (drh 20-May-04): int i, j; +1.50 (drh 20-May-04): KeyInfo *pKeyInfo = (KeyInfo*)pOp->p3; +1.286 (drh 04-May-07): sqlite3_snprintf(nTemp, zTemp, "keyinfo(%d", pKeyInfo->nField); +1.50 (drh 20-May-04): i = strlen(zTemp); +1.50 (drh 20-May-04): for(j=0; jnField; j++){ +1.50 (drh 20-May-04): CollSeq *pColl = pKeyInfo->aColl[j]; +1.50 (drh 20-May-04): if( pColl ){ +1.50 (drh 20-May-04): int n = strlen(pColl->zName); +1.50 (drh 20-May-04): if( i+n>nTemp-6 ){ +1.286 (drh 04-May-07): memcpy(&zTemp[i],",...",4); +1.50 (drh 20-May-04): break; +1.50 (drh 20-May-04): } +1.50 (drh 20-May-04): zTemp[i++] = ','; +1.51 (drh 21-May-04): if( pKeyInfo->aSortOrder && pKeyInfo->aSortOrder[j] ){ +1.50 (drh 20-May-04): zTemp[i++] = '-'; +1.50 (drh 20-May-04): } +1.286 (drh 04-May-07): memcpy(&zTemp[i], pColl->zName,n+1); +1.50 (drh 20-May-04): i += n; +1.50 (drh 20-May-04): }else if( i+4p3; +1.286 (drh 04-May-07): sqlite3_snprintf(nTemp, zTemp, "collseq(%.20s)", pColl->zName); +1.50 (drh 20-May-04): zP3 = zTemp; +1.50 (drh 20-May-04): break; +1.50 (drh 20-May-04): } +1.67 (drh 26-May-04): case P3_FUNCDEF: { +1.67 (drh 26-May-04): FuncDef *pDef = (FuncDef*)pOp->p3; +1.244 (drh 13-Jun-06): sqlite3_snprintf(nTemp, zTemp, "%s(%d)", pDef->zName, pDef->nArg); +1.244 (drh 13-Jun-06): zP3 = zTemp; +1.244 (drh 13-Jun-06): break; +1.244 (drh 13-Jun-06): } +1.244 (drh 13-Jun-06): #ifndef SQLITE_OMIT_VIRTUALTABLE +1.244 (drh 13-Jun-06): case P3_VTAB: { +1.244 (drh 13-Jun-06): sqlite3_vtab *pVtab = (sqlite3_vtab*)pOp->p3; +1.256 (drh 26-Jun-06): sqlite3_snprintf(nTemp, zTemp, "vtab:%p:%p", pVtab, pVtab->pModule); +1.67 (drh 26-May-04): zP3 = zTemp; +1.67 (drh 26-May-04): break; +1.67 (drh 26-May-04): } +1.244 (drh 13-Jun-06): #endif +1.50 (drh 20-May-04): default: { +1.50 (drh 20-May-04): zP3 = pOp->p3; +1.135 (drh 24-Jul-04): if( zP3==0 || pOp->opcode==OP_Noop ){ +1.50 (drh 20-May-04): zP3 = ""; +1.50 (drh 20-May-04): } +1.50 (drh 20-May-04): } +1.50 (drh 20-May-04): } +1.249 (drh 15-Jun-06): assert( zP3!=0 ); +1.50 (drh 20-May-04): return zP3; +1.50 (drh 20-May-04): } +1.151 (drh 31-Oct-04): #endif +1.50 (drh 20-May-04): +1.312 (drh 28-Aug-07): /* +1.313 (drh 28-Aug-07): ** Declare to the Vdbe that the BTree object at db->aDb[i] is used. +1.313 (drh 28-Aug-07): ** +1.312 (drh 28-Aug-07): */ +1.317 (drh 30-Aug-07): void sqlite3VdbeUsesBtree(Vdbe *p, int i){ +1.317 (drh 30-Aug-07): int mask; +1.313 (drh 28-Aug-07): assert( i>=0 && idb->nDb ); +1.313 (drh 28-Aug-07): assert( ibtreeMask)*8 ); +1.317 (drh 30-Aug-07): mask = 1<btreeMask & mask)==0 ){ +1.317 (drh 30-Aug-07): p->btreeMask |= mask; +1.317 (drh 30-Aug-07): sqlite3BtreeMutexArrayInsert(&p->aMutex, p->db->aDb[i].pBt); +1.317 (drh 30-Aug-07): } +1.312 (drh 28-Aug-07): } +1.312 (drh 28-Aug-07): +1.50 (drh 20-May-04): +1.156 (danielk1 12-Jan-05): #if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Print a single opcode. This routine is used for debugging only. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){ +1.1 (drh 06-Sep-03): char *zP3; +1.50 (drh 20-May-04): char zPtr[50]; +1.50 (drh 20-May-04): static const char *zFormat1 = "%4d %-13s %4d %4d %s\n"; +1.1 (drh 06-Sep-03): if( pOut==0 ) pOut = stdout; +1.50 (drh 20-May-04): zP3 = displayP3(pOp, zPtr, sizeof(zPtr)); +1.50 (drh 20-May-04): fprintf(pOut, zFormat1, +1.311 (drh 27-Aug-07): pc, sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, zP3); +1.1 (drh 06-Sep-03): fflush(pOut); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): #endif +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.146 (drh 24-Sep-04): ** Release an array of N Mem elements +1.146 (drh 24-Sep-04): */ +1.146 (drh 24-Sep-04): static void releaseMemArray(Mem *p, int N){ +1.146 (drh 24-Sep-04): if( p ){ +1.146 (drh 24-Sep-04): while( N-->0 ){ +1.307 (drh 21-Aug-07): assert( N<2 || p[0].db==p[1].db ); +1.146 (drh 24-Sep-04): sqlite3VdbeMemRelease(p++); +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): } +1.146 (drh 24-Sep-04): +1.151 (drh 31-Oct-04): #ifndef SQLITE_OMIT_EXPLAIN +1.146 (drh 24-Sep-04): /* +1.1 (drh 06-Sep-03): ** Give a listing of the program in the virtual machine. +1.1 (drh 06-Sep-03): ** +1.19 (danielk1 08-May-04): ** The interface is the same as sqlite3VdbeExec(). But instead of +1.1 (drh 06-Sep-03): ** running the code, it invokes the callback once for each instruction. +1.1 (drh 06-Sep-03): ** This feature is used to implement "EXPLAIN". +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): int sqlite3VdbeList( +1.1 (drh 06-Sep-03): Vdbe *p /* The VDBE */ +1.1 (drh 06-Sep-03): ){ +1.140 (drh 06-Sep-04): sqlite3 *db = p->db; +1.1 (drh 06-Sep-03): int i; +1.14 (drh 14-Feb-04): int rc = SQLITE_OK; +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): assert( p->explain ); +1.155 (drh 11-Jan-05): if( p->magic!=VDBE_MAGIC_RUN ) return SQLITE_MISUSE; +1.155 (drh 11-Jan-05): assert( db->magic==SQLITE_MAGIC_BUSY ); +1.155 (drh 11-Jan-05): assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY ); +1.56 (danielk1 22-May-04): +1.56 (danielk1 22-May-04): /* Even though this opcode does not put dynamic strings onto the +1.56 (danielk1 22-May-04): ** the stack, they may become dynamic if the user calls +1.68 (drh 26-May-04): ** sqlite3_column_text16(), causing a translation to UTF-16 encoding. +1.56 (danielk1 22-May-04): */ +1.56 (danielk1 22-May-04): if( p->pTos==&p->aStack[4] ){ +1.146 (drh 24-Sep-04): releaseMemArray(p->aStack, 5); +1.56 (danielk1 22-May-04): } +1.56 (danielk1 22-May-04): p->resOnStack = 0; +1.56 (danielk1 22-May-04): +1.197 (drh 10-Sep-05): do{ +1.197 (drh 10-Sep-05): i = p->pc++; +1.197 (drh 10-Sep-05): }while( inOp && p->explain==2 && p->aOp[i].opcode!=OP_Explain ); +1.14 (drh 14-Feb-04): if( i>=p->nOp ){ +1.14 (drh 14-Feb-04): p->rc = SQLITE_OK; +1.14 (drh 14-Feb-04): rc = SQLITE_DONE; +1.259 (drh 26-Jul-06): }else if( db->u1.isInterrupted ){ +1.155 (drh 11-Jan-05): p->rc = SQLITE_INTERRUPT; +1.14 (drh 14-Feb-04): rc = SQLITE_ERROR; +1.89 (danielk1 31-May-04): sqlite3SetString(&p->zErrMsg, sqlite3ErrStr(p->rc), (char*)0); +1.14 (drh 14-Feb-04): }else{ +1.50 (drh 20-May-04): Op *pOp = &p->aOp[i]; +1.69 (drh 27-May-04): Mem *pMem = p->aStack; +1.69 (drh 27-May-04): pMem->flags = MEM_Int; +1.88 (drh 31-May-04): pMem->type = SQLITE_INTEGER; +1.276 (drh 30-Mar-07): pMem->u.i = i; /* Program counter */ +1.69 (drh 27-May-04): pMem++; +1.69 (drh 27-May-04): +1.69 (drh 27-May-04): pMem->flags = MEM_Static|MEM_Str|MEM_Term; +1.311 (drh 27-Aug-07): pMem->z = (char*)sqlite3OpcodeName(pOp->opcode); /* Opcode */ +1.249 (drh 15-Jun-06): assert( pMem->z!=0 ); +1.69 (drh 27-May-04): pMem->n = strlen(pMem->z); +1.88 (drh 31-May-04): pMem->type = SQLITE_TEXT; +1.104 (danielk1 12-Jun-04): pMem->enc = SQLITE_UTF8; +1.69 (drh 27-May-04): pMem++; +1.69 (drh 27-May-04): +1.69 (drh 27-May-04): pMem->flags = MEM_Int; +1.276 (drh 30-Mar-07): pMem->u.i = pOp->p1; /* P1 */ +1.88 (drh 31-May-04): pMem->type = SQLITE_INTEGER; +1.69 (drh 27-May-04): pMem++; +1.69 (drh 27-May-04): +1.69 (drh 27-May-04): pMem->flags = MEM_Int; +1.276 (drh 30-Mar-07): pMem->u.i = pOp->p2; /* P2 */ +1.88 (drh 31-May-04): pMem->type = SQLITE_INTEGER; +1.69 (drh 27-May-04): pMem++; +1.69 (drh 27-May-04): +1.239 (drh 03-Mar-06): pMem->flags = MEM_Ephem|MEM_Str|MEM_Term; /* P3 */ +1.69 (drh 27-May-04): pMem->z = displayP3(pOp, pMem->zShort, sizeof(pMem->zShort)); +1.249 (drh 15-Jun-06): assert( pMem->z!=0 ); +1.239 (drh 03-Mar-06): pMem->n = strlen(pMem->z); +1.88 (drh 31-May-04): pMem->type = SQLITE_TEXT; +1.104 (danielk1 12-Jun-04): pMem->enc = SQLITE_UTF8; +1.69 (drh 27-May-04): +1.197 (drh 10-Sep-05): p->nResColumn = 5 - 2*(p->explain-1); +1.69 (drh 27-May-04): p->pTos = pMem; +1.14 (drh 14-Feb-04): p->rc = SQLITE_OK; +1.56 (danielk1 22-May-04): p->resOnStack = 1; +1.14 (drh 14-Feb-04): rc = SQLITE_ROW; +1.1 (drh 06-Sep-03): } +1.14 (drh 14-Feb-04): return rc; +1.1 (drh 06-Sep-03): } +1.151 (drh 31-Oct-04): #endif /* SQLITE_OMIT_EXPLAIN */ +1.1 (drh 06-Sep-03): +1.281 (drh 05-Apr-07): #ifdef SQLITE_DEBUG +1.1 (drh 06-Sep-03): /* +1.135 (drh 24-Jul-04): ** Print the SQL that was used to generate a VDBE program. +1.135 (drh 24-Jul-04): */ +1.135 (drh 24-Jul-04): void sqlite3VdbePrintSql(Vdbe *p){ +1.135 (drh 24-Jul-04): int nOp = p->nOp; +1.135 (drh 24-Jul-04): VdbeOp *pOp; +1.142 (drh 15-Sep-04): if( nOp<1 ) return; +1.142 (drh 15-Sep-04): pOp = &p->aOp[nOp-1]; +1.135 (drh 24-Jul-04): if( pOp->opcode==OP_Noop && pOp->p3!=0 ){ +1.135 (drh 24-Jul-04): const char *z = pOp->p3; +1.136 (drh 08-Aug-04): while( isspace(*(u8*)z) ) z++; +1.135 (drh 24-Jul-04): printf("SQL: [%s]\n", z); +1.135 (drh 24-Jul-04): } +1.281 (drh 05-Apr-07): } +1.135 (drh 24-Jul-04): #endif +1.135 (drh 24-Jul-04): +1.271 (drh 01-Mar-07): #if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) +1.271 (drh 01-Mar-07): /* +1.271 (drh 01-Mar-07): ** Print an IOTRACE message showing SQL content. +1.271 (drh 01-Mar-07): */ +1.271 (drh 01-Mar-07): void sqlite3VdbeIOTraceSql(Vdbe *p){ +1.271 (drh 01-Mar-07): int nOp = p->nOp; +1.271 (drh 01-Mar-07): VdbeOp *pOp; +1.271 (drh 01-Mar-07): if( sqlite3_io_trace==0 ) return; +1.271 (drh 01-Mar-07): if( nOp<1 ) return; +1.271 (drh 01-Mar-07): pOp = &p->aOp[nOp-1]; +1.271 (drh 01-Mar-07): if( pOp->opcode==OP_Noop && pOp->p3!=0 ){ +1.271 (drh 01-Mar-07): int i, j; +1.297 (drh 13-Aug-07): char z[1000]; +1.297 (drh 13-Aug-07): sqlite3_snprintf(sizeof(z), z, "%s", pOp->p3); +1.288 (danielk1 16-May-07): for(i=0; isspace((unsigned char)z[i]); i++){} +1.271 (drh 01-Mar-07): for(j=0; z[i]; i++){ +1.288 (danielk1 16-May-07): if( isspace((unsigned char)z[i]) ){ +1.271 (drh 01-Mar-07): if( z[i-1]!=' ' ){ +1.271 (drh 01-Mar-07): z[j++] = ' '; +1.271 (drh 01-Mar-07): } +1.271 (drh 01-Mar-07): }else{ +1.271 (drh 01-Mar-07): z[j++] = z[i]; +1.271 (drh 01-Mar-07): } +1.271 (drh 01-Mar-07): } +1.271 (drh 01-Mar-07): z[j] = 0; +1.271 (drh 01-Mar-07): sqlite3_io_trace("SQL %s\n", z); +1.271 (drh 01-Mar-07): } +1.271 (drh 01-Mar-07): } +1.271 (drh 01-Mar-07): #endif /* !SQLITE_OMIT_TRACE && SQLITE_ENABLE_IOTRACE */ +1.271 (drh 01-Mar-07): +1.271 (drh 01-Mar-07): +1.135 (drh 24-Jul-04): /* +1.1 (drh 06-Sep-03): ** Prepare a virtual machine for execution. This involves things such +1.1 (drh 06-Sep-03): ** as allocating stack space and initializing the program counter. +1.1 (drh 06-Sep-03): ** After the VDBE has be prepped, it can be executed by one or more +1.19 (danielk1 08-May-04): ** calls to sqlite3VdbeExec(). +1.139 (drh 02-Sep-04): ** +1.139 (drh 02-Sep-04): ** This is the only way to move a VDBE from VDBE_MAGIC_INIT to +1.139 (drh 02-Sep-04): ** VDBE_MAGIC_RUN. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbeMakeReady( +1.1 (drh 06-Sep-03): Vdbe *p, /* The VDBE */ +1.2 (drh 06-Sep-03): int nVar, /* Number of '?' see in the SQL statement */ +1.138 (drh 21-Aug-04): int nMem, /* Number of memory cells to allocate */ +1.138 (drh 21-Aug-04): int nCursor, /* Number of cursors to allocate */ +1.1 (drh 06-Sep-03): int isExplain /* True if the EXPLAIN keywords is present */ +1.1 (drh 06-Sep-03): ){ +1.1 (drh 06-Sep-03): int n; +1.300 (danielk1 16-Aug-07): sqlite3 *db = p->db; +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): assert( p!=0 ); +1.1 (drh 06-Sep-03): assert( p->magic==VDBE_MAGIC_INIT ); +1.1 (drh 06-Sep-03): +1.142 (drh 15-Sep-04): /* There should be at least one opcode. +1.1 (drh 06-Sep-03): */ +1.142 (drh 15-Sep-04): assert( p->nOp>0 ); +1.1 (drh 06-Sep-03): +1.170 (danielk1 28-Mar-05): /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. This +1.170 (danielk1 28-Mar-05): * is because the call to resizeOpArray() below may shrink the +1.170 (danielk1 28-Mar-05): * p->aOp[] array to save memory if called when in VDBE_MAGIC_RUN +1.170 (danielk1 28-Mar-05): * state. +1.170 (danielk1 28-Mar-05): */ +1.170 (danielk1 28-Mar-05): p->magic = VDBE_MAGIC_RUN; +1.170 (danielk1 28-Mar-05): +1.1 (drh 06-Sep-03): /* No instruction ever pushes more than a single element onto the +1.1 (drh 06-Sep-03): ** stack. And the stack never grows on successive executions of the +1.1 (drh 06-Sep-03): ** same loop. So the total number of instructions is an upper bound +1.180 (drh 07-Jun-05): ** on the maximum stack depth required. (Added later:) The +1.180 (drh 07-Jun-05): ** resolveP2Values() call computes a tighter upper bound on the +1.180 (drh 07-Jun-05): ** stack size. +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** Allocation all the stack space we will ever need. +1.1 (drh 06-Sep-03): */ +1.3 (drh 06-Sep-03): if( p->aStack==0 ){ +1.170 (danielk1 28-Mar-05): int nArg; /* Maximum number of args passed to a user function. */ +1.171 (danielk1 29-Mar-05): int nStack; /* Maximum number of stack entries required */ +1.171 (danielk1 29-Mar-05): resolveP2Values(p, &nArg, &nStack); +1.170 (danielk1 28-Mar-05): resizeOpArray(p, p->nOp); +1.3 (drh 06-Sep-03): assert( nVar>=0 ); +1.171 (danielk1 29-Mar-05): assert( nStacknOp ); +1.261 (drh 08-Aug-06): if( isExplain ){ +1.261 (drh 08-Aug-06): nStack = 10; +1.261 (drh 08-Aug-06): } +1.300 (danielk1 16-Aug-07): p->aStack = sqlite3DbMallocZero(db, +1.171 (danielk1 29-Mar-05): nStack*sizeof(p->aStack[0]) /* aStack */ +1.170 (danielk1 28-Mar-05): + nArg*sizeof(Mem*) /* apArg */ +1.149 (drh 05-Oct-04): + nVar*sizeof(Mem) /* aVar */ +1.149 (drh 05-Oct-04): + nVar*sizeof(char*) /* azVar */ +1.149 (drh 05-Oct-04): + nMem*sizeof(Mem) /* aMem */ +1.149 (drh 05-Oct-04): + nCursor*sizeof(Cursor*) /* apCsr */ +1.3 (drh 06-Sep-03): ); +1.299 (drh 16-Aug-07): if( !db->mallocFailed ){ +1.171 (danielk1 29-Mar-05): p->aMem = &p->aStack[nStack]; +1.149 (drh 05-Oct-04): p->nMem = nMem; +1.149 (drh 05-Oct-04): p->aVar = &p->aMem[nMem]; +1.149 (drh 05-Oct-04): p->nVar = nVar; +1.138 (drh 21-Aug-04): p->okVar = 0; +1.149 (drh 05-Oct-04): p->apArg = (Mem**)&p->aVar[nVar]; +1.170 (danielk1 28-Mar-05): p->azVar = (char**)&p->apArg[nArg]; +1.149 (drh 05-Oct-04): p->apCsr = (Cursor**)&p->azVar[nVar]; +1.138 (drh 21-Aug-04): p->nCursor = nCursor; +1.138 (drh 21-Aug-04): for(n=0; naVar[n].flags = MEM_Null; +1.307 (drh 21-Aug-07): p->aVar[n].db = db; +1.307 (drh 21-Aug-07): } +1.307 (drh 21-Aug-07): for(n=0; naStack[n].db = db; +1.138 (drh 21-Aug-04): } +1.42 (danielk1 19-May-04): } +1.3 (drh 06-Sep-03): } +1.166 (danielk1 29-Jan-05): for(n=0; nnMem; n++){ +1.166 (danielk1 29-Jan-05): p->aMem[n].flags = MEM_Null; +1.307 (drh 21-Aug-07): p->aMem[n].db = db; +1.166 (danielk1 29-Jan-05): } +1.1 (drh 06-Sep-03): +1.10 (drh 31-Jan-04): p->pTos = &p->aStack[-1]; +1.85 (danielk1 31-May-04): p->pc = -1; +1.1 (drh 06-Sep-03): p->rc = SQLITE_OK; +1.1 (drh 06-Sep-03): p->uniqueCnt = 0; +1.1 (drh 06-Sep-03): p->returnDepth = 0; +1.1 (drh 06-Sep-03): p->errorAction = OE_Abort; +1.1 (drh 06-Sep-03): p->popStack = 0; +1.1 (drh 06-Sep-03): p->explain |= isExplain; +1.1 (drh 06-Sep-03): p->magic = VDBE_MAGIC_RUN; +1.121 (danielk1 21-Jun-04): p->nChange = 0; +1.219 (drh 07-Jan-06): p->cacheCtr = 1; +1.215 (drh 29-Dec-05): p->minWriteFileFormat = 255; +1.294 (danielk1 27-Jun-07): p->openedStatement = 0; +1.1 (drh 06-Sep-03): #ifdef VDBE_PROFILE +1.6 (drh 31-Dec-03): { +1.6 (drh 31-Dec-03): int i; +1.6 (drh 31-Dec-03): for(i=0; inOp; i++){ +1.6 (drh 31-Dec-03): p->aOp[i].cnt = 0; +1.6 (drh 31-Dec-03): p->aOp[i].cycles = 0; +1.6 (drh 31-Dec-03): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): #endif +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.316 (drh 29-Aug-07): ** Close a VDBE cursor and release all the resources that cursor happens +1.1 (drh 06-Sep-03): ** to hold. +1.1 (drh 06-Sep-03): */ +1.252 (danielk1 23-Jun-06): void sqlite3VdbeFreeCursor(Vdbe *p, Cursor *pCx){ +1.106 (drh 12-Jun-04): if( pCx==0 ){ +1.106 (drh 12-Jun-04): return; +1.106 (drh 12-Jun-04): } +1.1 (drh 06-Sep-03): if( pCx->pCursor ){ +1.19 (danielk1 08-May-04): sqlite3BtreeCloseCursor(pCx->pCursor); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): if( pCx->pBt ){ +1.19 (danielk1 08-May-04): sqlite3BtreeClose(pCx->pBt); +1.1 (drh 06-Sep-03): } +1.243 (drh 12-Jun-06): #ifndef SQLITE_OMIT_VIRTUALTABLE +1.243 (drh 12-Jun-06): if( pCx->pVtabCursor ){ +1.243 (drh 12-Jun-06): sqlite3_vtab_cursor *pVtabCursor = pCx->pVtabCursor; +1.252 (danielk1 23-Jun-06): const sqlite3_module *pModule = pCx->pModule; +1.252 (danielk1 23-Jun-06): p->inVtabMethod = 1; +1.258 (danielk1 25-Jul-06): sqlite3SafetyOff(p->db); +1.243 (drh 12-Jun-06): pModule->xClose(pVtabCursor); +1.258 (danielk1 25-Jul-06): sqlite3SafetyOn(p->db); +1.252 (danielk1 23-Jun-06): p->inVtabMethod = 0; +1.243 (drh 12-Jun-06): } +1.243 (drh 12-Jun-06): #endif +1.299 (drh 16-Aug-07): sqlite3_free(pCx->pData); +1.299 (drh 16-Aug-07): sqlite3_free(pCx->aType); +1.299 (drh 16-Aug-07): sqlite3_free(pCx); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.316 (drh 29-Aug-07): ** Close all cursors except for VTab cursors that are currently +1.316 (drh 29-Aug-07): ** in use. +1.1 (drh 06-Sep-03): */ +1.316 (drh 29-Aug-07): static void closeAllCursorsExceptActiveVtabs(Vdbe *p){ +1.1 (drh 06-Sep-03): int i; +1.138 (drh 21-Aug-04): if( p->apCsr==0 ) return; +1.1 (drh 06-Sep-03): for(i=0; inCursor; i++){ +1.316 (drh 29-Aug-07): Cursor *pC = p->apCsr[i]; +1.316 (drh 29-Aug-07): if( pC && (!p->inVtabMethod || !pC->pVtabCursor) ){ +1.316 (drh 29-Aug-07): sqlite3VdbeFreeCursor(p, pC); +1.253 (danielk1 23-Jun-06): p->apCsr[i] = 0; +1.252 (danielk1 23-Jun-06): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Clean up the VM after execution. +1.1 (drh 06-Sep-03): ** +1.1 (drh 06-Sep-03): ** This routine will automatically close any cursors, lists, and/or +1.1 (drh 06-Sep-03): ** sorters that were left open. It also deletes the values of +1.43 (drh 19-May-04): ** variables in the aVar[] array. +1.1 (drh 06-Sep-03): */ +1.1 (drh 06-Sep-03): static void Cleanup(Vdbe *p){ +1.1 (drh 06-Sep-03): int i; +1.10 (drh 31-Jan-04): if( p->aStack ){ +1.146 (drh 24-Sep-04): releaseMemArray(p->aStack, 1 + (p->pTos - p->aStack)); +1.146 (drh 24-Sep-04): p->pTos = &p->aStack[-1]; +1.10 (drh 31-Jan-04): } +1.316 (drh 29-Aug-07): closeAllCursorsExceptActiveVtabs(p); +1.146 (drh 24-Sep-04): releaseMemArray(p->aMem, p->nMem); +1.183 (drh 08-Jul-05): sqlite3VdbeFifoClear(&p->sFifo); +1.146 (drh 24-Sep-04): if( p->contextStack ){ +1.146 (drh 24-Sep-04): for(i=0; icontextStackTop; i++){ +1.183 (drh 08-Jul-05): sqlite3VdbeFifoClear(&p->contextStack[i].sFifo); +1.146 (drh 24-Sep-04): } +1.299 (drh 16-Aug-07): sqlite3_free(p->contextStack); +1.143 (drh 19-Sep-04): } +1.17 (drh 21-Feb-04): p->contextStack = 0; +1.143 (drh 19-Sep-04): p->contextStackDepth = 0; +1.143 (drh 19-Sep-04): p->contextStackTop = 0; +1.299 (drh 16-Aug-07): sqlite3_free(p->zErrMsg); +1.1 (drh 06-Sep-03): p->zErrMsg = 0; +1.293 (drh 20-Jun-07): p->resOnStack = 0; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.64 (danielk1 25-May-04): ** Set the number of result columns that will be returned by this SQL +1.64 (danielk1 25-May-04): ** statement. This is now set at compile time, rather than during +1.64 (danielk1 25-May-04): ** execution of the vdbe program so that sqlite3_column_count() can +1.64 (danielk1 25-May-04): ** be called on an SQL statement before sqlite3_step(). +1.64 (danielk1 25-May-04): */ +1.64 (danielk1 25-May-04): void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){ +1.146 (drh 24-Sep-04): Mem *pColName; +1.146 (drh 24-Sep-04): int n; +1.308 (drh 23-Aug-07): +1.236 (danielk1 10-Feb-06): releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); +1.299 (drh 16-Aug-07): sqlite3_free(p->aColName); +1.236 (danielk1 10-Feb-06): n = nResColumn*COLNAME_N; +1.64 (danielk1 25-May-04): p->nResColumn = nResColumn; +1.300 (danielk1 16-Aug-07): p->aColName = pColName = (Mem*)sqlite3DbMallocZero(p->db, sizeof(Mem)*n ); +1.146 (drh 24-Sep-04): if( p->aColName==0 ) return; +1.146 (drh 24-Sep-04): while( n-- > 0 ){ +1.308 (drh 23-Aug-07): pColName->flags = MEM_Null; +1.309 (drh 24-Aug-07): pColName->db = p->db; +1.308 (drh 23-Aug-07): pColName++; +1.146 (drh 24-Sep-04): } +1.66 (danielk1 26-May-04): } +1.66 (danielk1 26-May-04): +1.66 (danielk1 26-May-04): /* +1.66 (danielk1 26-May-04): ** Set the name of the idx'th column to be returned by the SQL statement. +1.66 (danielk1 26-May-04): ** zName must be a pointer to a nul terminated string. +1.66 (danielk1 26-May-04): ** +1.66 (danielk1 26-May-04): ** This call must be made after a call to sqlite3VdbeSetNumCols(). +1.66 (danielk1 26-May-04): ** +1.105 (danielk1 12-Jun-04): ** If N==P3_STATIC it means that zName is a pointer to a constant static +1.105 (danielk1 12-Jun-04): ** string and we can just copy the pointer. If it is P3_DYNAMIC, then +1.299 (drh 16-Aug-07): ** the string is freed using sqlite3_free() when the vdbe is finished with +1.105 (danielk1 12-Jun-04): ** it. Otherwise, N bytes of zName are copied. +1.66 (danielk1 26-May-04): */ +1.236 (danielk1 10-Feb-06): int sqlite3VdbeSetColName(Vdbe *p, int idx, int var, const char *zName, int N){ +1.66 (danielk1 26-May-04): int rc; +1.66 (danielk1 26-May-04): Mem *pColName; +1.236 (danielk1 10-Feb-06): assert( idxnResColumn ); +1.236 (danielk1 10-Feb-06): assert( vardb->mallocFailed ) return SQLITE_NOMEM; +1.146 (drh 24-Sep-04): assert( p->aColName!=0 ); +1.236 (danielk1 10-Feb-06): pColName = &(p->aColName[idx+var*p->nResColumn]); +1.105 (danielk1 12-Jun-04): if( N==P3_DYNAMIC || N==P3_STATIC ){ +1.307 (drh 21-Aug-07): rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, SQLITE_STATIC); +1.66 (danielk1 26-May-04): }else{ +1.307 (drh 21-Aug-07): rc = sqlite3VdbeMemSetStr(pColName, zName, N, SQLITE_UTF8,SQLITE_TRANSIENT); +1.66 (danielk1 26-May-04): } +1.66 (danielk1 26-May-04): if( rc==SQLITE_OK && N==P3_DYNAMIC ){ +1.66 (danielk1 26-May-04): pColName->flags = (pColName->flags&(~MEM_Static))|MEM_Dyn; +1.105 (danielk1 12-Jun-04): pColName->xDel = 0; +1.66 (danielk1 26-May-04): } +1.66 (danielk1 26-May-04): return rc; +1.64 (danielk1 25-May-04): } +1.64 (danielk1 25-May-04): +1.91 (danielk1 03-Jun-04): /* +1.91 (danielk1 03-Jun-04): ** A read or write transaction may or may not be active on database handle +1.91 (danielk1 03-Jun-04): ** db. If a transaction is active, commit it. If there is a +1.91 (danielk1 03-Jun-04): ** write-transaction spanning more than one database file, this routine +1.91 (danielk1 03-Jun-04): ** takes care of the master journal trickery. +1.91 (danielk1 03-Jun-04): */ +1.140 (drh 06-Sep-04): static int vdbeCommit(sqlite3 *db){ +1.91 (danielk1 03-Jun-04): int i; +1.91 (danielk1 03-Jun-04): int nTrans = 0; /* Number of databases with an active write-transaction */ +1.91 (danielk1 03-Jun-04): int rc = SQLITE_OK; +1.91 (danielk1 03-Jun-04): int needXcommit = 0; +1.91 (danielk1 03-Jun-04): +1.258 (danielk1 25-Jul-06): /* Before doing anything else, call the xSync() callback for any +1.258 (danielk1 25-Jul-06): ** virtual module tables written in this transaction. This has to +1.258 (danielk1 25-Jul-06): ** be done before determining whether a master journal file is +1.258 (danielk1 25-Jul-06): ** required, as an xSync() callback may add an attached database +1.258 (danielk1 25-Jul-06): ** to the transaction. +1.258 (danielk1 25-Jul-06): */ +1.258 (danielk1 25-Jul-06): rc = sqlite3VtabSync(db, rc); +1.258 (danielk1 25-Jul-06): if( rc!=SQLITE_OK ){ +1.258 (danielk1 25-Jul-06): return rc; +1.258 (danielk1 25-Jul-06): } +1.258 (danielk1 25-Jul-06): +1.258 (danielk1 25-Jul-06): /* This loop determines (a) if the commit hook should be invoked and +1.258 (danielk1 25-Jul-06): ** (b) how many database files have open write transactions, not +1.258 (danielk1 25-Jul-06): ** including the temp database. (b) is important because if more than +1.258 (danielk1 25-Jul-06): ** one database file has an open write transaction, a master journal +1.258 (danielk1 25-Jul-06): ** file is required for an atomic commit. +1.258 (danielk1 25-Jul-06): */ +1.91 (danielk1 03-Jun-04): for(i=0; inDb; i++){ +1.91 (danielk1 03-Jun-04): Btree *pBt = db->aDb[i].pBt; +1.313 (drh 28-Aug-07): if( sqlite3BtreeIsInTrans(pBt) ){ +1.91 (danielk1 03-Jun-04): needXcommit = 1; +1.91 (danielk1 03-Jun-04): if( i!=1 ) nTrans++; +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): +1.91 (danielk1 03-Jun-04): /* If there are any write-transactions at all, invoke the commit hook */ +1.91 (danielk1 03-Jun-04): if( needXcommit && db->xCommitCallback ){ +1.139 (drh 02-Sep-04): sqlite3SafetyOff(db); +1.139 (drh 02-Sep-04): rc = db->xCommitCallback(db->pCommitArg); +1.139 (drh 02-Sep-04): sqlite3SafetyOn(db); +1.139 (drh 02-Sep-04): if( rc ){ +1.91 (danielk1 03-Jun-04): return SQLITE_CONSTRAINT; +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): +1.128 (danielk1 26-Jun-04): /* The simple case - no more than one database file (not counting the +1.128 (danielk1 26-Jun-04): ** TEMP database) has a transaction active. There is no need for the +1.94 (drh 07-Jun-04): ** master-journal. +1.99 (drh 09-Jun-04): ** +1.128 (danielk1 26-Jun-04): ** If the return value of sqlite3BtreeGetFilename() is a zero length +1.128 (danielk1 26-Jun-04): ** string, it means the main database is :memory:. In that case we do +1.128 (danielk1 26-Jun-04): ** not support atomic multi-file commits, so use the simple case then +1.99 (drh 09-Jun-04): ** too. +1.91 (danielk1 03-Jun-04): */ +1.128 (danielk1 26-Jun-04): if( 0==strlen(sqlite3BtreeGetFilename(db->aDb[0].pBt)) || nTrans<=1 ){ +1.94 (drh 07-Jun-04): for(i=0; rc==SQLITE_OK && inDb; i++){ +1.91 (danielk1 03-Jun-04): Btree *pBt = db->aDb[i].pBt; +1.91 (danielk1 03-Jun-04): if( pBt ){ +1.277 (drh 30-Mar-07): rc = sqlite3BtreeCommitPhaseOne(pBt, 0); +1.94 (drh 07-Jun-04): } +1.94 (drh 07-Jun-04): } +1.94 (drh 07-Jun-04): +1.277 (drh 30-Mar-07): /* Do the commit only if all databases successfully complete phase 1. +1.277 (drh 30-Mar-07): ** If one of the BtreeCommitPhaseOne() calls fails, this indicates an +1.277 (drh 30-Mar-07): ** IO error while deleting or truncating a journal file. It is unlikely, +1.277 (drh 30-Mar-07): ** but could happen. In this case abandon processing and return the error. +1.274 (danielk1 27-Mar-07): */ +1.274 (danielk1 27-Mar-07): for(i=0; rc==SQLITE_OK && inDb; i++){ +1.274 (danielk1 27-Mar-07): Btree *pBt = db->aDb[i].pBt; +1.274 (danielk1 27-Mar-07): if( pBt ){ +1.277 (drh 30-Mar-07): rc = sqlite3BtreeCommitPhaseTwo(pBt); +1.274 (danielk1 27-Mar-07): } +1.274 (danielk1 27-Mar-07): } +1.94 (drh 07-Jun-04): if( rc==SQLITE_OK ){ +1.250 (danielk1 16-Jun-06): sqlite3VtabCommit(db); +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): +1.91 (danielk1 03-Jun-04): /* The complex case - There is a multi-file write-transaction active. +1.91 (danielk1 03-Jun-04): ** This requires a master journal file to ensure the transaction is +1.91 (danielk1 03-Jun-04): ** committed atomicly. +1.91 (danielk1 03-Jun-04): */ +1.179 (danielk1 27-May-05): #ifndef SQLITE_OMIT_DISKIO +1.91 (danielk1 03-Jun-04): else{ +1.302 (danielk1 17-Aug-07): sqlite3_vfs *pVfs = db->pVfs; +1.188 (drh 27-Aug-05): int needSync = 0; +1.91 (danielk1 03-Jun-04): char *zMaster = 0; /* File-name for the master journal */ +1.91 (danielk1 03-Jun-04): char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt); +1.302 (danielk1 17-Aug-07): sqlite3_file *pMaster = 0; +1.298 (danielk1 15-Aug-07): i64 offset = 0; +1.91 (danielk1 03-Jun-04): +1.91 (danielk1 03-Jun-04): /* Select a master journal file name */ +1.91 (danielk1 03-Jun-04): do { +1.98 (drh 09-Jun-04): u32 random; +1.299 (drh 16-Aug-07): sqlite3_free(zMaster); +1.91 (danielk1 03-Jun-04): sqlite3Randomness(sizeof(random), &random); +1.300 (danielk1 16-Aug-07): zMaster = sqlite3MPrintf(db, "%s-mj%08X", zMainFile, random&0x7fffffff); +1.91 (danielk1 03-Jun-04): if( !zMaster ){ +1.91 (danielk1 03-Jun-04): return SQLITE_NOMEM; +1.91 (danielk1 03-Jun-04): } +1.302 (danielk1 17-Aug-07): }while( sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS) ); +1.91 (danielk1 03-Jun-04): +1.91 (danielk1 03-Jun-04): /* Open the master journal. */ +1.303 (danielk1 18-Aug-07): rc = sqlite3OsOpenMalloc(pVfs, zMaster, &pMaster, +1.303 (danielk1 18-Aug-07): SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| +1.305 (danielk1 20-Aug-07): SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_MASTER_JOURNAL, 0 +1.303 (danielk1 18-Aug-07): ); +1.91 (danielk1 03-Jun-04): if( rc!=SQLITE_OK ){ +1.299 (drh 16-Aug-07): sqlite3_free(zMaster); +1.91 (danielk1 03-Jun-04): return rc; +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): +1.91 (danielk1 03-Jun-04): /* Write the name of each database file in the transaction into the new +1.91 (danielk1 03-Jun-04): ** master journal file. If an error occurs at this point close +1.91 (danielk1 03-Jun-04): ** and delete the master journal file. All the individual journal files +1.91 (danielk1 03-Jun-04): ** still have 'null' as the master journal pointer, so they will roll +1.157 (danielk1 13-Jan-05): ** back independently if a failure occurs. +1.91 (danielk1 03-Jun-04): */ +1.300 (danielk1 16-Aug-07): for(i=0; inDb; i++){ +1.91 (danielk1 03-Jun-04): Btree *pBt = db->aDb[i].pBt; +1.99 (drh 09-Jun-04): if( i==1 ) continue; /* Ignore the TEMP database */ +1.313 (drh 28-Aug-07): if( sqlite3BtreeIsInTrans(pBt) ){ +1.109 (danielk1 14-Jun-04): char const *zFile = sqlite3BtreeGetJournalname(pBt); +1.99 (drh 09-Jun-04): if( zFile[0]==0 ) continue; /* Ignore :memory: databases */ +1.188 (drh 27-Aug-05): if( !needSync && !sqlite3BtreeSyncDisabled(pBt) ){ +1.188 (drh 27-Aug-05): needSync = 1; +1.188 (drh 27-Aug-05): } +1.302 (danielk1 17-Aug-07): rc = sqlite3OsWrite(pMaster, zFile, strlen(zFile)+1, offset); +1.298 (danielk1 15-Aug-07): offset += strlen(zFile)+1; +1.91 (danielk1 03-Jun-04): if( rc!=SQLITE_OK ){ +1.303 (danielk1 18-Aug-07): sqlite3OsCloseFree(pMaster); +1.303 (danielk1 18-Aug-07): sqlite3OsDelete(pVfs, zMaster, 0); +1.299 (drh 16-Aug-07): sqlite3_free(zMaster); +1.91 (danielk1 03-Jun-04): return rc; +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): +1.310 (danielk1 24-Aug-07): /* Sync the master journal file. If the IOCAP_SEQUENTIAL device +1.310 (danielk1 24-Aug-07): ** flag is set this is not required. +1.310 (danielk1 24-Aug-07): */ +1.109 (danielk1 14-Jun-04): zMainFile = sqlite3BtreeGetDirname(db->aDb[0].pBt); +1.304 (danielk1 20-Aug-07): if( (needSync +1.310 (danielk1 24-Aug-07): && (0==(sqlite3OsDeviceCharacteristics(pMaster)&SQLITE_IOCAP_SEQUENTIAL)) +1.304 (danielk1 20-Aug-07): && (rc=sqlite3OsSync(pMaster, SQLITE_SYNC_NORMAL))!=SQLITE_OK) ){ +1.303 (danielk1 18-Aug-07): sqlite3OsCloseFree(pMaster); +1.303 (danielk1 18-Aug-07): sqlite3OsDelete(pVfs, zMaster, 0); +1.299 (drh 16-Aug-07): sqlite3_free(zMaster); +1.109 (danielk1 14-Jun-04): return rc; +1.109 (danielk1 14-Jun-04): } +1.91 (danielk1 03-Jun-04): +1.91 (danielk1 03-Jun-04): /* Sync all the db files involved in the transaction. The same call +1.91 (danielk1 03-Jun-04): ** sets the master journal pointer in each individual journal. If +1.91 (danielk1 03-Jun-04): ** an error occurs here, do not delete the master journal file. +1.91 (danielk1 03-Jun-04): ** +1.277 (drh 30-Mar-07): ** If the error occurs during the first call to +1.277 (drh 30-Mar-07): ** sqlite3BtreeCommitPhaseOne(), then there is a chance that the +1.277 (drh 30-Mar-07): ** master journal file will be orphaned. But we cannot delete it, +1.277 (drh 30-Mar-07): ** in case the master journal file name was written into the journal +1.277 (drh 30-Mar-07): ** file before the failure occured. +1.91 (danielk1 03-Jun-04): */ +1.258 (danielk1 25-Jul-06): for(i=0; rc==SQLITE_OK && inDb; i++){ +1.91 (danielk1 03-Jun-04): Btree *pBt = db->aDb[i].pBt; +1.313 (drh 28-Aug-07): if( pBt ){ +1.277 (drh 30-Mar-07): rc = sqlite3BtreeCommitPhaseOne(pBt, zMaster); +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): } +1.303 (danielk1 18-Aug-07): sqlite3OsCloseFree(pMaster); +1.258 (danielk1 25-Jul-06): if( rc!=SQLITE_OK ){ +1.299 (drh 16-Aug-07): sqlite3_free(zMaster); +1.258 (danielk1 25-Jul-06): return rc; +1.258 (danielk1 25-Jul-06): } +1.91 (danielk1 03-Jun-04): +1.110 (danielk1 14-Jun-04): /* Delete the master journal file. This commits the transaction. After +1.110 (danielk1 14-Jun-04): ** doing this the directory is synced again before any individual +1.110 (danielk1 14-Jun-04): ** transaction files are deleted. +1.110 (danielk1 14-Jun-04): */ +1.303 (danielk1 18-Aug-07): rc = sqlite3OsDelete(pVfs, zMaster, 1); +1.299 (drh 16-Aug-07): sqlite3_free(zMaster); +1.278 (drh 30-Mar-07): zMaster = 0; +1.263 (drh 13-Aug-06): if( rc ){ +1.263 (drh 13-Aug-06): return rc; +1.263 (drh 13-Aug-06): } +1.91 (danielk1 03-Jun-04): +1.91 (danielk1 03-Jun-04): /* All files and directories have already been synced, so the following +1.277 (drh 30-Mar-07): ** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and +1.277 (drh 30-Mar-07): ** deleting or truncating journals. If something goes wrong while +1.277 (drh 30-Mar-07): ** this is happening we don't really care. The integrity of the +1.277 (drh 30-Mar-07): ** transaction is already guaranteed, but some stray 'cold' journals +1.277 (drh 30-Mar-07): ** may be lying around. Returning an error code won't help matters. +1.91 (danielk1 03-Jun-04): */ +1.274 (danielk1 27-Mar-07): disable_simulated_io_errors(); +1.91 (danielk1 03-Jun-04): for(i=0; inDb; i++){ +1.91 (danielk1 03-Jun-04): Btree *pBt = db->aDb[i].pBt; +1.91 (danielk1 03-Jun-04): if( pBt ){ +1.277 (drh 30-Mar-07): sqlite3BtreeCommitPhaseTwo(pBt); +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): } +1.274 (danielk1 27-Mar-07): enable_simulated_io_errors(); +1.274 (danielk1 27-Mar-07): +1.250 (danielk1 16-Jun-06): sqlite3VtabCommit(db); +1.91 (danielk1 03-Jun-04): } +1.179 (danielk1 27-May-05): #endif +1.112 (danielk1 14-Jun-04): +1.94 (drh 07-Jun-04): return rc; +1.91 (danielk1 03-Jun-04): } +1.91 (danielk1 03-Jun-04): +1.85 (danielk1 31-May-04): /* +1.85 (danielk1 31-May-04): ** This routine checks that the sqlite3.activeVdbeCnt count variable +1.85 (danielk1 31-May-04): ** matches the number of vdbe's in the list sqlite3.pVdbe that are +1.85 (danielk1 31-May-04): ** currently active. An assertion fails if the two counts do not match. +1.139 (drh 02-Sep-04): ** This is an internal self-check only - it is not an essential processing +1.139 (drh 02-Sep-04): ** step. +1.85 (danielk1 31-May-04): ** +1.85 (danielk1 31-May-04): ** This is a no-op if NDEBUG is defined. +1.85 (danielk1 31-May-04): */ +1.85 (danielk1 31-May-04): #ifndef NDEBUG +1.140 (drh 06-Sep-04): static void checkActiveVdbeCnt(sqlite3 *db){ +1.85 (danielk1 31-May-04): Vdbe *p; +1.85 (danielk1 31-May-04): int cnt = 0; +1.85 (danielk1 31-May-04): p = db->pVdbe; +1.85 (danielk1 31-May-04): while( p ){ +1.139 (drh 02-Sep-04): if( p->magic==VDBE_MAGIC_RUN && p->pc>=0 ){ +1.85 (danielk1 31-May-04): cnt++; +1.85 (danielk1 31-May-04): } +1.85 (danielk1 31-May-04): p = p->pNext; +1.85 (danielk1 31-May-04): } +1.85 (danielk1 31-May-04): assert( cnt==db->activeVdbeCnt ); +1.85 (danielk1 31-May-04): } +1.85 (danielk1 31-May-04): #else +1.85 (danielk1 31-May-04): #define checkActiveVdbeCnt(x) +1.85 (danielk1 31-May-04): #endif +1.85 (danielk1 31-May-04): +1.64 (danielk1 25-May-04): /* +1.317 (drh 30-Aug-07): ** For every Btree that in database connection db which +1.317 (drh 30-Aug-07): ** has been modified, "trip" or invalidate each cursor in +1.317 (drh 30-Aug-07): ** that Btree might have been modified so that the cursor +1.317 (drh 30-Aug-07): ** can never be used again. This happens when a rollback +1.317 (drh 30-Aug-07): *** occurs. We have to trip all the other cursors, even +1.317 (drh 30-Aug-07): ** cursor from other VMs in different database connections, +1.317 (drh 30-Aug-07): ** so that none of them try to use the data at which they +1.317 (drh 30-Aug-07): ** were pointing and which now may have been changed due +1.317 (drh 30-Aug-07): ** to the rollback. +1.317 (drh 30-Aug-07): ** +1.317 (drh 30-Aug-07): ** Remember that a rollback can delete tables complete and +1.317 (drh 30-Aug-07): ** reorder rootpages. So it is not sufficient just to save +1.317 (drh 30-Aug-07): ** the state of the cursor. We have to invalidate the cursor +1.317 (drh 30-Aug-07): ** so that it is never used again. +1.317 (drh 30-Aug-07): */ +1.317 (drh 30-Aug-07): void invalidateCursorsOnModifiedBtrees(sqlite3 *db){ +1.317 (drh 30-Aug-07): int i; +1.317 (drh 30-Aug-07): for(i=0; inDb; i++){ +1.317 (drh 30-Aug-07): Btree *p = db->aDb[i].pBt; +1.317 (drh 30-Aug-07): if( p && sqlite3BtreeIsInTrans(p) ){ +1.317 (drh 30-Aug-07): sqlite3BtreeTripAllCursors(p, SQLITE_ABORT); +1.317 (drh 30-Aug-07): } +1.252 (danielk1 23-Jun-06): } +1.252 (danielk1 23-Jun-06): } +1.252 (danielk1 23-Jun-06): +1.252 (danielk1 23-Jun-06): /* +1.139 (drh 02-Sep-04): ** This routine is called the when a VDBE tries to halt. If the VDBE +1.139 (drh 02-Sep-04): ** has made changes and is in autocommit mode, then commit those +1.139 (drh 02-Sep-04): ** changes. If a rollback is needed, then do the rollback. +1.139 (drh 02-Sep-04): ** +1.139 (drh 02-Sep-04): ** This routine is the only way to move the state of a VM from +1.316 (drh 29-Aug-07): ** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to +1.316 (drh 29-Aug-07): ** call this on a VM that is in the SQLITE_MAGIC_HALT state. +1.1 (drh 06-Sep-03): ** +1.139 (drh 02-Sep-04): ** Return an error code. If the commit could not complete because of +1.139 (drh 02-Sep-04): ** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it +1.139 (drh 02-Sep-04): ** means the close did not happen and needs to be repeated. +1.1 (drh 06-Sep-03): */ +1.316 (drh 29-Aug-07): int sqlite3VdbeHalt(Vdbe *p){ +1.140 (drh 06-Sep-04): sqlite3 *db = p->db; +1.1 (drh 06-Sep-03): int i; +1.85 (danielk1 31-May-04): int (*xFunc)(Btree *pBt) = 0; /* Function to call on each btree backend */ +1.228 (danielk1 20-Jan-06): int isSpecialError; /* Set to true if SQLITE_NOMEM or IOERR */ +1.228 (danielk1 20-Jan-06): +1.228 (danielk1 20-Jan-06): /* This function contains the logic that determines if a statement or +1.228 (danielk1 20-Jan-06): ** transaction will be committed or rolled back as a result of the +1.228 (danielk1 20-Jan-06): ** execution of this virtual machine. +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): ** Special errors: +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): ** If an SQLITE_NOMEM error has occured in a statement that writes to +1.228 (danielk1 20-Jan-06): ** the database, then either a statement or transaction must be rolled +1.228 (danielk1 20-Jan-06): ** back to ensure the tree-structures are in a consistent state. A +1.228 (danielk1 20-Jan-06): ** statement transaction is rolled back if one is open, otherwise the +1.228 (danielk1 20-Jan-06): ** entire transaction must be rolled back. +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): ** If an SQLITE_IOERR error has occured in a statement that writes to +1.228 (danielk1 20-Jan-06): ** the database, then the entire transaction must be rolled back. The +1.228 (danielk1 20-Jan-06): ** I/O error may have caused garbage to be written to the journal +1.228 (danielk1 20-Jan-06): ** file. Were the transaction to continue and eventually be rolled +1.228 (danielk1 20-Jan-06): ** back that garbage might end up in the database file. +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): ** In both of the above cases, the Vdbe.errorAction variable is +1.228 (danielk1 20-Jan-06): ** ignored. If the sqlite3.autoCommit flag is false and a transaction +1.228 (danielk1 20-Jan-06): ** is rolled back, it will be set to true. +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): ** Other errors: +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): ** No error: +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): */ +1.1 (drh 06-Sep-03): +1.299 (drh 16-Aug-07): if( p->db->mallocFailed ){ +1.208 (danielk1 06-Dec-05): p->rc = SQLITE_NOMEM; +1.208 (danielk1 06-Dec-05): } +1.316 (drh 29-Aug-07): closeAllCursorsExceptActiveVtabs(p); +1.139 (drh 02-Sep-04): if( p->magic!=VDBE_MAGIC_RUN ){ +1.139 (drh 02-Sep-04): return SQLITE_OK; +1.1 (drh 06-Sep-03): } +1.85 (danielk1 31-May-04): checkActiveVdbeCnt(db); +1.208 (danielk1 06-Dec-05): +1.228 (danielk1 20-Jan-06): /* No commit or rollback needed if the program never started */ +1.228 (danielk1 20-Jan-06): if( p->pc>=0 ){ +1.266 (drh 23-Sep-06): int mrc; /* Primary error code from p->rc */ +1.316 (drh 29-Aug-07): +1.316 (drh 29-Aug-07): /* Lock all btrees used by the statement */ +1.316 (drh 29-Aug-07): sqlite3BtreeMutexArrayEnter(&p->aMutex); +1.316 (drh 29-Aug-07): +1.228 (danielk1 20-Jan-06): /* Check for one of the special errors - SQLITE_NOMEM or SQLITE_IOERR */ +1.266 (drh 23-Sep-06): mrc = p->rc & 0xff; +1.291 (danielk1 13-Jun-07): isSpecialError = ( +1.291 (danielk1 13-Jun-07): (mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR || mrc==SQLITE_INTERRUPT)?1:0); +1.228 (danielk1 20-Jan-06): if( isSpecialError ){ +1.208 (danielk1 06-Dec-05): /* This loop does static analysis of the query to see which of the +1.208 (danielk1 06-Dec-05): ** following three categories it falls into: +1.208 (danielk1 06-Dec-05): ** +1.208 (danielk1 06-Dec-05): ** Read-only +1.228 (danielk1 20-Jan-06): ** Query with statement journal +1.228 (danielk1 20-Jan-06): ** Query without statement journal +1.208 (danielk1 06-Dec-05): ** +1.208 (danielk1 06-Dec-05): ** We could do something more elegant than this static analysis (i.e. +1.208 (danielk1 06-Dec-05): ** store the type of query as part of the compliation phase), but +1.228 (danielk1 20-Jan-06): ** handling malloc() or IO failure is a fairly obscure edge case so +1.228 (danielk1 20-Jan-06): ** this is probably easier. Todo: Might be an opportunity to reduce +1.228 (danielk1 20-Jan-06): ** code size a very small amount though... +1.208 (danielk1 06-Dec-05): */ +1.208 (danielk1 06-Dec-05): int isReadOnly = 1; +1.208 (danielk1 06-Dec-05): int isStatement = 0; +1.208 (danielk1 06-Dec-05): assert(p->aOp || p->nOp==0); +1.208 (danielk1 06-Dec-05): for(i=0; inOp; i++){ +1.208 (danielk1 06-Dec-05): switch( p->aOp[i].opcode ){ +1.208 (danielk1 06-Dec-05): case OP_Transaction: +1.292 (danielk1 15-Jun-07): /* This is a bit strange. If we hit a malloc() or IO error and +1.292 (danielk1 15-Jun-07): ** the statement did not open a statement transaction, we will +1.292 (danielk1 15-Jun-07): ** rollback any active transaction and abort all other active +1.292 (danielk1 15-Jun-07): ** statements. Or, if this is an SQLITE_INTERRUPT error, we +1.292 (danielk1 15-Jun-07): ** will only rollback if the interrupted statement was a write. +1.292 (danielk1 15-Jun-07): ** +1.292 (danielk1 15-Jun-07): ** It could be argued that read-only statements should never +1.292 (danielk1 15-Jun-07): ** rollback anything. But careful analysis is required before +1.292 (danielk1 15-Jun-07): ** making this change +1.292 (danielk1 15-Jun-07): */ +1.292 (danielk1 15-Jun-07): if( p->aOp[i].p2 || mrc!=SQLITE_INTERRUPT ){ +1.292 (danielk1 15-Jun-07): isReadOnly = 0; +1.292 (danielk1 15-Jun-07): } +1.208 (danielk1 06-Dec-05): break; +1.208 (danielk1 06-Dec-05): case OP_Statement: +1.208 (danielk1 06-Dec-05): isStatement = 1; +1.208 (danielk1 06-Dec-05): break; +1.208 (danielk1 06-Dec-05): } +1.208 (danielk1 06-Dec-05): } +1.316 (drh 29-Aug-07): +1.316 (drh 29-Aug-07): +1.228 (danielk1 20-Jan-06): /* If the query was read-only, we need do no rollback at all. Otherwise, +1.228 (danielk1 20-Jan-06): ** proceed with the special handling. +1.228 (danielk1 20-Jan-06): */ +1.228 (danielk1 20-Jan-06): if( !isReadOnly ){ +1.290 (danielk1 13-Jun-07): if( p->rc==SQLITE_IOERR_BLOCKED && isStatement ){ +1.290 (danielk1 13-Jun-07): xFunc = sqlite3BtreeRollbackStmt; +1.290 (danielk1 13-Jun-07): p->rc = SQLITE_BUSY; +1.290 (danielk1 13-Jun-07): } else if( p->rc==SQLITE_NOMEM && isStatement ){ +1.228 (danielk1 20-Jan-06): xFunc = sqlite3BtreeRollbackStmt; +1.228 (danielk1 20-Jan-06): }else{ +1.228 (danielk1 20-Jan-06): /* We are forced to roll back the active transaction. Before doing +1.228 (danielk1 20-Jan-06): ** so, abort any other statements this handle currently has active. +1.228 (danielk1 20-Jan-06): */ +1.317 (drh 30-Aug-07): invalidateCursorsOnModifiedBtrees(db); +1.229 (danielk1 20-Jan-06): sqlite3RollbackAll(db); +1.228 (danielk1 20-Jan-06): db->autoCommit = 1; +1.228 (danielk1 20-Jan-06): } +1.208 (danielk1 06-Dec-05): } +1.208 (danielk1 06-Dec-05): } +1.228 (danielk1 20-Jan-06): +1.228 (danielk1 20-Jan-06): /* If the auto-commit flag is set and this is the only active vdbe, then +1.228 (danielk1 20-Jan-06): ** we do either a commit or rollback of the current transaction. +1.228 (danielk1 20-Jan-06): ** +1.228 (danielk1 20-Jan-06): ** Note: This block also runs if one of the special errors handled +1.228 (danielk1 20-Jan-06): ** above has occured. +1.228 (danielk1 20-Jan-06): */ +1.228 (danielk1 20-Jan-06): if( db->autoCommit && db->activeVdbeCnt==1 ){ +1.228 (danielk1 20-Jan-06): if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ +1.296 (drh 07-Aug-07): /* The auto-commit flag is true, and the vdbe program was +1.228 (danielk1 20-Jan-06): ** successful or hit an 'OR FAIL' constraint. This means a commit +1.228 (danielk1 20-Jan-06): ** is required. +1.228 (danielk1 20-Jan-06): */ +1.228 (danielk1 20-Jan-06): int rc = vdbeCommit(db); +1.228 (danielk1 20-Jan-06): if( rc==SQLITE_BUSY ){ +1.316 (drh 29-Aug-07): sqlite3BtreeMutexArrayLeave(&p->aMutex); +1.228 (danielk1 20-Jan-06): return SQLITE_BUSY; +1.228 (danielk1 20-Jan-06): }else if( rc!=SQLITE_OK ){ +1.228 (danielk1 20-Jan-06): p->rc = rc; +1.229 (danielk1 20-Jan-06): sqlite3RollbackAll(db); +1.228 (danielk1 20-Jan-06): }else{ +1.228 (danielk1 20-Jan-06): sqlite3CommitInternalChanges(db); +1.228 (danielk1 20-Jan-06): } +1.228 (danielk1 20-Jan-06): }else{ +1.229 (danielk1 20-Jan-06): sqlite3RollbackAll(db); +1.228 (danielk1 20-Jan-06): } +1.228 (danielk1 20-Jan-06): }else if( !xFunc ){ +1.228 (danielk1 20-Jan-06): if( p->rc==SQLITE_OK || p->errorAction==OE_Fail ){ +1.294 (danielk1 27-Jun-07): if( p->openedStatement ){ +1.294 (danielk1 27-Jun-07): xFunc = sqlite3BtreeCommitStmt; +1.294 (danielk1 27-Jun-07): } +1.228 (danielk1 20-Jan-06): }else if( p->errorAction==OE_Abort ){ +1.228 (danielk1 20-Jan-06): xFunc = sqlite3BtreeRollbackStmt; +1.228 (danielk1 20-Jan-06): }else{ +1.317 (drh 30-Aug-07): invalidateCursorsOnModifiedBtrees(db); +1.229 (danielk1 20-Jan-06): sqlite3RollbackAll(db); +1.228 (danielk1 20-Jan-06): db->autoCommit = 1; +1.228 (danielk1 20-Jan-06): } +1.228 (danielk1 20-Jan-06): } +1.228 (danielk1 20-Jan-06): +1.228 (danielk1 20-Jan-06): /* If xFunc is not NULL, then it is one of sqlite3BtreeRollbackStmt or +1.228 (danielk1 20-Jan-06): ** sqlite3BtreeCommitStmt. Call it once on each backend. If an error occurs +1.228 (danielk1 20-Jan-06): ** and the return code is still SQLITE_OK, set the return code to the new +1.228 (danielk1 20-Jan-06): ** error value. +1.228 (danielk1 20-Jan-06): */ +1.228 (danielk1 20-Jan-06): assert(!xFunc || +1.228 (danielk1 20-Jan-06): xFunc==sqlite3BtreeCommitStmt || +1.228 (danielk1 20-Jan-06): xFunc==sqlite3BtreeRollbackStmt +1.228 (danielk1 20-Jan-06): ); +1.228 (danielk1 20-Jan-06): for(i=0; xFunc && inDb; i++){ +1.228 (danielk1 20-Jan-06): int rc; +1.228 (danielk1 20-Jan-06): Btree *pBt = db->aDb[i].pBt; +1.228 (danielk1 20-Jan-06): if( pBt ){ +1.228 (danielk1 20-Jan-06): rc = xFunc(pBt); +1.231 (danielk1 23-Jan-06): if( rc && (p->rc==SQLITE_OK || p->rc==SQLITE_CONSTRAINT) ){ +1.231 (danielk1 23-Jan-06): p->rc = rc; +1.231 (danielk1 23-Jan-06): sqlite3SetString(&p->zErrMsg, 0); +1.231 (danielk1 23-Jan-06): } +1.228 (danielk1 20-Jan-06): } +1.85 (danielk1 31-May-04): } +1.228 (danielk1 20-Jan-06): +1.228 (danielk1 20-Jan-06): /* If this was an INSERT, UPDATE or DELETE and the statement was committed, +1.228 (danielk1 20-Jan-06): ** set the change counter. +1.228 (danielk1 20-Jan-06): */ +1.228 (danielk1 20-Jan-06): if( p->changeCntOn && p->pc>=0 ){ +1.228 (danielk1 20-Jan-06): if( !xFunc || xFunc==sqlite3BtreeCommitStmt ){ +1.228 (danielk1 20-Jan-06): sqlite3VdbeSetChanges(db, p->nChange); +1.228 (danielk1 20-Jan-06): }else{ +1.228 (danielk1 20-Jan-06): sqlite3VdbeSetChanges(db, 0); +1.228 (danielk1 20-Jan-06): } +1.228 (danielk1 20-Jan-06): p->nChange = 0; +1.87 (danielk1 31-May-04): } +1.228 (danielk1 20-Jan-06): +1.228 (danielk1 20-Jan-06): /* Rollback or commit any schema changes that occurred. */ +1.228 (danielk1 20-Jan-06): if( p->rc!=SQLITE_OK && db->flags&SQLITE_InternChanges ){ +1.228 (danielk1 20-Jan-06): sqlite3ResetInternalSchema(db, 0); +1.228 (danielk1 20-Jan-06): db->flags = (db->flags | SQLITE_InternChanges); +1.121 (danielk1 21-Jun-04): } +1.316 (drh 29-Aug-07): +1.316 (drh 29-Aug-07): /* Release the locks */ +1.316 (drh 29-Aug-07): sqlite3BtreeMutexArrayLeave(&p->aMutex); +1.1 (drh 06-Sep-03): } +1.85 (danielk1 31-May-04): +1.254 (danielk1 24-Jun-06): /* We have successfully halted and closed the VM. Record this fact. */ +1.254 (danielk1 24-Jun-06): if( p->pc>=0 ){ +1.85 (danielk1 31-May-04): db->activeVdbeCnt--; +1.1 (drh 06-Sep-03): } +1.139 (drh 02-Sep-04): p->magic = VDBE_MAGIC_HALT; +1.139 (drh 02-Sep-04): checkActiveVdbeCnt(db); +1.316 (drh 29-Aug-07): if( p->db->mallocFailed ){ +1.316 (drh 29-Aug-07): p->rc = SQLITE_NOMEM; +1.316 (drh 29-Aug-07): } +1.316 (drh 29-Aug-07): checkActiveVdbeCnt(db); +1.139 (drh 02-Sep-04): +1.139 (drh 02-Sep-04): return SQLITE_OK; +1.139 (drh 02-Sep-04): } +1.314 (drh 28-Aug-07): +1.139 (drh 02-Sep-04): +1.139 (drh 02-Sep-04): /* +1.270 (drh 09-Jan-07): ** Each VDBE holds the result of the most recent sqlite3_step() call +1.270 (drh 09-Jan-07): ** in p->rc. This routine sets that result back to SQLITE_OK. +1.270 (drh 09-Jan-07): */ +1.270 (drh 09-Jan-07): void sqlite3VdbeResetStepResult(Vdbe *p){ +1.270 (drh 09-Jan-07): p->rc = SQLITE_OK; +1.270 (drh 09-Jan-07): } +1.270 (drh 09-Jan-07): +1.270 (drh 09-Jan-07): /* +1.139 (drh 02-Sep-04): ** Clean up a VDBE after execution but do not delete the VDBE just yet. +1.139 (drh 02-Sep-04): ** Write any error messages into *pzErrMsg. Return the result code. +1.139 (drh 02-Sep-04): ** +1.139 (drh 02-Sep-04): ** After this routine is run, the VDBE should be ready to be executed +1.139 (drh 02-Sep-04): ** again. +1.139 (drh 02-Sep-04): ** +1.139 (drh 02-Sep-04): ** To look at it another way, this routine resets the state of the +1.139 (drh 02-Sep-04): ** virtual machine from VDBE_MAGIC_RUN or VDBE_MAGIC_HALT back to +1.139 (drh 02-Sep-04): ** VDBE_MAGIC_INIT. +1.139 (drh 02-Sep-04): */ +1.139 (drh 02-Sep-04): int sqlite3VdbeReset(Vdbe *p){ +1.265 (drh 15-Sep-06): sqlite3 *db; +1.265 (drh 15-Sep-06): db = p->db; +1.85 (danielk1 31-May-04): +1.139 (drh 02-Sep-04): /* If the VM did not run to completion or if it encountered an +1.139 (drh 02-Sep-04): ** error, then it might not have been halted properly. So halt +1.139 (drh 02-Sep-04): ** it now. +1.139 (drh 02-Sep-04): */ +1.265 (drh 15-Sep-06): sqlite3SafetyOn(db); +1.139 (drh 02-Sep-04): sqlite3VdbeHalt(p); +1.265 (drh 15-Sep-06): sqlite3SafetyOff(db); +1.139 (drh 02-Sep-04): +1.162 (drh 24-Jan-05): /* If the VDBE has be run even partially, then transfer the error code +1.162 (drh 24-Jan-05): ** and error message from the VDBE into the main database structure. But +1.162 (drh 24-Jan-05): ** if the VDBE has just been set to run but has not actually executed any +1.162 (drh 24-Jan-05): ** instructions yet, leave the main database error information unchanged. +1.139 (drh 02-Sep-04): */ +1.162 (drh 24-Jan-05): if( p->pc>=0 ){ +1.162 (drh 24-Jan-05): if( p->zErrMsg ){ +1.307 (drh 21-Aug-07): sqlite3ValueSetStr(db->pErr,-1,p->zErrMsg,SQLITE_UTF8,sqlite3_free); +1.229 (danielk1 20-Jan-06): db->errCode = p->rc; +1.162 (drh 24-Jan-05): p->zErrMsg = 0; +1.162 (drh 24-Jan-05): }else if( p->rc ){ +1.265 (drh 15-Sep-06): sqlite3Error(db, p->rc, 0); +1.162 (drh 24-Jan-05): }else{ +1.265 (drh 15-Sep-06): sqlite3Error(db, SQLITE_OK, 0); +1.162 (drh 24-Jan-05): } +1.163 (danielk1 24-Jan-05): }else if( p->rc && p->expired ){ +1.163 (danielk1 24-Jan-05): /* The expired flag was set on the VDBE before the first call +1.163 (danielk1 24-Jan-05): ** to sqlite3_step(). For consistency (since sqlite3_step() was +1.163 (danielk1 24-Jan-05): ** called), set the database error in this case as well. +1.163 (danielk1 24-Jan-05): */ +1.265 (drh 15-Sep-06): sqlite3Error(db, p->rc, 0); +1.139 (drh 02-Sep-04): } +1.139 (drh 02-Sep-04): +1.139 (drh 02-Sep-04): /* Reclaim all memory used by the VDBE +1.139 (drh 02-Sep-04): */ +1.139 (drh 02-Sep-04): Cleanup(p); +1.139 (drh 02-Sep-04): +1.139 (drh 02-Sep-04): /* Save profiling information from this VDBE run. +1.139 (drh 02-Sep-04): */ +1.208 (danielk1 06-Dec-05): assert( p->pTos<&p->aStack[p->pc<0?0:p->pc] || !p->aStack ); +1.1 (drh 06-Sep-03): #ifdef VDBE_PROFILE +1.1 (drh 06-Sep-03): { +1.1 (drh 06-Sep-03): FILE *out = fopen("vdbe_profile.out", "a"); +1.1 (drh 06-Sep-03): if( out ){ +1.1 (drh 06-Sep-03): int i; +1.1 (drh 06-Sep-03): fprintf(out, "---- "); +1.1 (drh 06-Sep-03): for(i=0; inOp; i++){ +1.1 (drh 06-Sep-03): fprintf(out, "%02x", p->aOp[i].opcode); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): fprintf(out, "\n"); +1.1 (drh 06-Sep-03): for(i=0; inOp; i++){ +1.1 (drh 06-Sep-03): fprintf(out, "%6d %10lld %8lld ", +1.1 (drh 06-Sep-03): p->aOp[i].cnt, +1.1 (drh 06-Sep-03): p->aOp[i].cycles, +1.1 (drh 06-Sep-03): p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0 +1.1 (drh 06-Sep-03): ); +1.19 (danielk1 08-May-04): sqlite3VdbePrintOp(out, i, &p->aOp[i]); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): fclose(out); +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): #endif +1.1 (drh 06-Sep-03): p->magic = VDBE_MAGIC_INIT; +1.133 (drh 30-Jun-04): p->aborted = 0; +1.265 (drh 15-Sep-06): return p->rc & db->errMask; +1.1 (drh 06-Sep-03): } +1.139 (drh 02-Sep-04): +1.1 (drh 06-Sep-03): /* +1.1 (drh 06-Sep-03): ** Clean up and delete a VDBE after execution. Return an integer which is +1.1 (drh 06-Sep-03): ** the result code. Write any error message text into *pzErrMsg. +1.1 (drh 06-Sep-03): */ +1.122 (danielk1 21-Jun-04): int sqlite3VdbeFinalize(Vdbe *p){ +1.129 (danielk1 26-Jun-04): int rc = SQLITE_OK; +1.129 (danielk1 26-Jun-04): if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){ +1.129 (danielk1 26-Jun-04): rc = sqlite3VdbeReset(p); +1.265 (drh 15-Sep-06): assert( (rc & p->db->errMask)==rc ); +1.129 (danielk1 26-Jun-04): }else if( p->magic!=VDBE_MAGIC_INIT ){ +1.1 (drh 06-Sep-03): return SQLITE_MISUSE; +1.1 (drh 06-Sep-03): } +1.19 (danielk1 08-May-04): sqlite3VdbeDelete(p); +1.1 (drh 06-Sep-03): return rc; +1.42 (danielk1 19-May-04): } +1.1 (drh 06-Sep-03): +1.1 (drh 06-Sep-03): /* +1.120 (drh 19-Jun-04): ** Call the destructor for each auxdata entry in pVdbeFunc for which +1.123 (danielk1 21-Jun-04): ** the corresponding bit in mask is clear. Auxdata entries beyond 31 +1.120 (drh 19-Jun-04): ** are always destroyed. To destroy all auxdata entries, call this +1.123 (danielk1 21-Jun-04): ** routine with mask==0. +1.120 (drh 19-Jun-04): */ +1.120 (drh 19-Jun-04): void sqlite3VdbeDeleteAuxData(VdbeFunc *pVdbeFunc, int mask){ +1.120 (drh 19-Jun-04): int i; +1.120 (drh 19-Jun-04): for(i=0; inAux; i++){ +1.120 (drh 19-Jun-04): struct AuxData *pAux = &pVdbeFunc->apAux[i]; +1.120 (drh 19-Jun-04): if( (i>31 || !(mask&(1<pAux ){ +1.120 (drh 19-Jun-04): if( pAux->xDelete ){ +1.120 (drh 19-Jun-04): pAux->xDelete(pAux->pAux); +1.120 (drh 19-Jun-04): } +1.120 (drh 19-Jun-04): pAux->pAux = 0; +1.120 (drh 19-Jun-04): } +1.120 (drh 19-Jun-04): } +1.120 (drh 19-Jun-04): } +1.120 (drh 19-Jun-04): +1.120 (drh 19-Jun-04): /* +1.1 (drh 06-Sep-03): ** Delete an entire VDBE. +1.1 (drh 06-Sep-03): */ +1.19 (danielk1 08-May-04): void sqlite3VdbeDelete(Vdbe *p){ +1.1 (drh 06-Sep-03): int i; +1.1 (drh 06-Sep-03): if( p==0 ) return; +1.1 (drh 06-Sep-03): Cleanup(p); +1.1 (drh 06-Sep-03): if( p->pPrev ){ +1.1 (drh 06-Sep-03): p->pPrev->pNext = p->pNext; +1.1 (drh 06-Sep-03): }else{ +1.1 (drh 06-Sep-03): assert( p->db->pVdbe==p ); +1.1 (drh 06-Sep-03): p->db->pVdbe = p->pNext; +1.1 (drh 06-Sep-03): } +1.1 (drh 06-Sep-03): if( p->pNext ){ +1.1 (drh 06-Sep-03): p->pNext->pPrev = p->pPrev; +1.1 (drh 06-Sep-03): } +1.146 (drh 24-Sep-04): if( p->aOp ){ +1.146 (drh 24-Sep-04): for(i=0; inOp; i++){ +1.146 (drh 24-Sep-04): Op *pOp = &p->aOp[i]; +1.198 (drh 16-Sep-05): freeP3(pOp->p3type, pOp->p3); +1.92 (danielk1 05-Jun-04): } +1.299 (drh 16-Aug-07): sqlite3_free(p->aOp); +1.2 (drh 06-Sep-03): } +1.146 (drh 24-Sep-04): releaseMemArray(p->aVar, p->nVar); +1.299 (drh 16-Aug-07): sqlite3_free(p->aLabel); +1.299 (drh 16-Aug-07): sqlite3_free(p->aStack); +1.236 (danielk1 10-Feb-06): releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); +1.299 (drh 16-Aug-07): sqlite3_free(p->aColName); +1.299 (drh 16-Aug-07): sqlite3_free(p->zSql); +1.1 (drh 06-Sep-03): p->magic = VDBE_MAGIC_DEAD; +1.299 (drh 16-Aug-07): sqlite3_free(p); +1.1 (drh 06-Sep-03): } +1.7 (drh 07-Jan-04): +1.7 (drh 07-Jan-04): /* +1.7 (drh 07-Jan-04): ** If a MoveTo operation is pending on the given cursor, then do that +1.7 (drh 07-Jan-04): ** MoveTo now. Return an error code. If no MoveTo is pending, this +1.7 (drh 07-Jan-04): ** routine does nothing and returns SQLITE_OK. +1.7 (drh 07-Jan-04): */ +1.19 (danielk1 08-May-04): int sqlite3VdbeCursorMoveto(Cursor *p){ +1.7 (drh 07-Jan-04): if( p->deferredMoveto ){ +1.165 (drh 26-Jan-05): int res, rc; +1.264 (adamd 14-Sep-06): #ifdef SQLITE_TEST +1.24 (danielk1 10-May-04): extern int sqlite3_search_count; +1.264 (adamd 14-Sep-06): #endif +1.181 (drh 12-Jun-05): assert( p->isTable ); +1.280 (drh 04-Apr-07): rc = sqlite3BtreeMoveto(p->pCursor, 0, p->movetoTarget, 0, &res); +1.165 (drh 26-Jan-05): if( rc ) return rc; +1.50 (drh 20-May-04): *p->pIncrKey = 0; +1.181 (drh 12-Jun-05): p->lastRowid = keyToInt(p->movetoTarget); +1.181 (drh 12-Jun-05): p->rowidIsValid = res==0; +1.7 (drh 07-Jan-04): if( res<0 ){ +1.165 (drh 26-Jan-05): rc = sqlite3BtreeNext(p->pCursor, &res); +1.165 (drh 26-Jan-05): if( rc ) return rc; +1.7 (drh 07-Jan-04): } +1.262 (drh 08-Aug-06): #ifdef SQLITE_TEST +1.24 (danielk1 10-May-04): sqlite3_search_count++; +1.262 (drh 08-Aug-06): #endif +1.7 (drh 07-Jan-04): p->deferredMoveto = 0; +1.219 (drh 07-Jan-06): p->cacheStatus = CACHE_STALE; +1.7 (drh 07-Jan-04): } +1.7 (drh 07-Jan-04): return SQLITE_OK; +1.7 (drh 07-Jan-04): } +1.19 (danielk1 08-May-04): +1.20 (drh 08-May-04): /* +1.28 (danielk1 12-May-04): ** The following functions: +1.23 (danielk1 10-May-04): ** +1.28 (danielk1 12-May-04): ** sqlite3VdbeSerialType() +1.28 (danielk1 12-May-04): ** sqlite3VdbeSerialTypeLen() +1.28 (danielk1 12-May-04): ** sqlite3VdbeSerialRead() +1.23 (danielk1 10-May-04): ** sqlite3VdbeSerialLen() +1.28 (danielk1 12-May-04): ** sqlite3VdbeSerialWrite() +1.23 (danielk1 10-May-04): ** +1.23 (danielk1 10-May-04): ** encapsulate the code that serializes values for storage in SQLite +1.28 (danielk1 12-May-04): ** data and index records. Each serialized value consists of a +1.28 (danielk1 12-May-04): ** 'serial-type' and a blob of data. The serial type is an 8-byte unsigned +1.28 (danielk1 12-May-04): ** integer, stored as a varint. +1.23 (danielk1 10-May-04): ** +1.28 (danielk1 12-May-04): ** In an SQLite index record, the serial type is stored directly before +1.28 (danielk1 12-May-04): ** the blob of data that it corresponds to. In a table record, all serial +1.28 (danielk1 12-May-04): ** types are stored at the start of the record, and the blobs of data at +1.28 (danielk1 12-May-04): ** the end. Hence these functions allow the caller to handle the +1.28 (danielk1 12-May-04): ** serial-type and data blob seperately. +1.28 (danielk1 12-May-04): ** +1.28 (danielk1 12-May-04): ** The following table describes the various storage classes for data: +1.28 (danielk1 12-May-04): ** +1.28 (danielk1 12-May-04): ** serial type bytes of data type +1.23 (danielk1 10-May-04): ** -------------- --------------- --------------- +1.84 (drh 30-May-04): ** 0 0 NULL +1.23 (danielk1 10-May-04): ** 1 1 signed integer +1.23 (danielk1 10-May-04): ** 2 2 signed integer +1.84 (drh 30-May-04): ** 3 3 signed integer +1.84 (drh 30-May-04): ** 4 4 signed integer +1.84 (drh 30-May-04): ** 5 6 signed integer +1.84 (drh 30-May-04): ** 6 8 signed integer +1.84 (drh 30-May-04): ** 7 8 IEEE float +1.215 (drh 29-Dec-05): ** 8 0 Integer constant 0 +1.215 (drh 29-Dec-05): ** 9 0 Integer constant 1 +1.215 (drh 29-Dec-05): ** 10,11 reserved for expansion +1.23 (danielk1 10-May-04): ** N>=12 and even (N-12)/2 BLOB +1.23 (danielk1 10-May-04): ** N>=13 and odd (N-13)/2 text +1.23 (danielk1 10-May-04): ** +1.217 (drh 02-Jan-06): ** The 8 and 9 types were added in 3.3.0, file format 4. Prior versions +1.217 (drh 02-Jan-06): ** of SQLite will not understand those serial types. +1.23 (danielk1 10-May-04): */ +1.23 (danielk1 10-May-04): +1.23 (danielk1 10-May-04): /* +1.28 (danielk1 12-May-04): ** Return the serial-type for the value stored in pMem. +1.22 (danielk1 10-May-04): */ +1.215 (drh 29-Dec-05): u32 sqlite3VdbeSerialType(Mem *pMem, int file_format){ +1.28 (danielk1 12-May-04): int flags = pMem->flags; +1.283 (drh 02-May-07): int n; +1.28 (danielk1 12-May-04): +1.28 (danielk1 12-May-04): if( flags&MEM_Null ){ +1.84 (drh 30-May-04): return 0; +1.23 (danielk1 10-May-04): } +1.28 (danielk1 12-May-04): if( flags&MEM_Int ){ +1.158 (drh 20-Jan-05): /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */ +1.175 (drh 15-Apr-05): # define MAX_6BYTE ((((i64)0x00001000)<<32)-1) +1.276 (drh 30-Mar-07): i64 i = pMem->u.i; +1.215 (drh 29-Dec-05): u64 u; +1.215 (drh 29-Dec-05): if( file_format>=4 && (i&1)==i ){ +1.215 (drh 29-Dec-05): return 8+i; +1.215 (drh 29-Dec-05): } +1.215 (drh 29-Dec-05): u = i<0 ? -i : i; +1.164 (drh 26-Jan-05): if( u<=127 ) return 1; +1.164 (drh 26-Jan-05): if( u<=32767 ) return 2; +1.164 (drh 26-Jan-05): if( u<=8388607 ) return 3; +1.164 (drh 26-Jan-05): if( u<=2147483647 ) return 4; +1.164 (drh 26-Jan-05): if( u<=MAX_6BYTE ) return 5; +1.84 (drh 30-May-04): return 6; +1.28 (danielk1 12-May-04): } +1.28 (danielk1 12-May-04): if( flags&MEM_Real ){ +1.84 (drh 30-May-04): return 7; +1.23 (danielk1 10-May-04): } +1.283 (drh 02-May-07): assert( flags&(MEM_Str|MEM_Blob) ); +1.283 (drh 02-May-07): n = pMem->n; +1.283 (drh 02-May-07): if( flags & MEM_Zero ){ +1.283 (drh 02-May-07): n += pMem->u.i; +1.23 (danielk1 10-May-04): } +1.283 (drh 02-May-07): assert( n>=0 ); +1.283 (drh 02-May-07): return ((n*2) + 12 + ((flags&MEM_Str)!=0)); +1.22 (danielk1 10-May-04): } +1.22 (danielk1 10-May-04): +1.22 (danielk1 10-May-04): /* +1.28 (danielk1 12-May-04): ** Return the length of the data corresponding to the supplied serial-type. +1.22 (danielk1 10-May-04): */ +1.75 (drh 28-May-04): int sqlite3VdbeSerialTypeLen(u32 serial_type){ +1.84 (drh 30-May-04): if( serial_type>=12 ){ +1.79 (drh 28-May-04): return (serial_type-12)/2; +1.79 (drh 28-May-04): }else{ +1.150 (drh 06-Oct-04): static const u8 aSize[] = { 0, 1, 2, 3, 4, 6, 8, 8, 0, 0, 0, 0 }; +1.79 (drh 28-May-04): return aSize[serial_type]; +1.79 (drh 28-May-04): } +1.28 (danielk1 12-May-04): } +1.23 (danielk1 10-May-04): +1.28 (danielk1 12-May-04): /* +1.284 (drh 04-May-07): ** If we are on an architecture with mixed-endian floating +1.289 (drh 23-May-07): ** points (ex: ARM7) then swap the lower 4 bytes with the +1.284 (drh 04-May-07): ** upper 4 bytes. Return the result. +1.284 (drh 04-May-07): ** +1.289 (drh 23-May-07): ** For most architectures, this is a no-op. +1.289 (drh 23-May-07): ** +1.289 (drh 23-May-07): ** (later): It is reported to me that the mixed-endian problem +1.289 (drh 23-May-07): ** on ARM7 is an issue with GCC, not with the ARM7 chip. It seems +1.289 (drh 23-May-07): ** that early versions of GCC stored the two words of a 64-bit +1.289 (drh 23-May-07): ** float in the wrong order. And that error has been propagated +1.289 (drh 23-May-07): ** ever since. The blame is not necessarily with GCC, though. +1.289 (drh 23-May-07): ** GCC might have just copying the problem from a prior compiler. +1.289 (drh 23-May-07): ** I am also told that newer versions of GCC that follow a different +1.289 (drh 23-May-07): ** ABI get the byte order right. +1.289 (drh 23-May-07): ** +1.289 (drh 23-May-07): ** Developers using SQLite on an ARM7 should compile and run their +1.289 (drh 23-May-07): ** application using -DSQLITE_DEBUG=1 at least once. With DEBUG +1.289 (drh 23-May-07): ** enabled, some asserts below will ensure that the byte order of +1.289 (drh 23-May-07): ** floating point values is correct. +1.318 (drh 30-Aug-07): ** +1.318 (drh 30-Aug-07): ** (2007-08-30) Frank van Vugt has studied this problem closely +1.318 (drh 30-Aug-07): ** and has send his findings to the SQLite developers. Frank +1.318 (drh 30-Aug-07): ** writes that some Linux kernels offer floating point hardware +1.318 (drh 30-Aug-07): ** emulation that uses only 32-bit mantissas instead of a full +1.318 (drh 30-Aug-07): ** 48-bits as required by the IEEE standard. (This is the +1.318 (drh 30-Aug-07): ** CONFIG_FPE_FASTFPE option.) On such systems, floating point +1.318 (drh 30-Aug-07): ** byte swapping becomes very complicated. To avoid problems, +1.318 (drh 30-Aug-07): ** the necessary byte swapping is carried out using a 64-bit integer +1.318 (drh 30-Aug-07): ** rather than a 64-bit float. Frank assures us that the code here +1.318 (drh 30-Aug-07): ** works for him. We, the developers, have no way to independently +1.318 (drh 30-Aug-07): ** verify this, but Frank seems to know what he is talking about +1.318 (drh 30-Aug-07): ** so we trust him. +1.284 (drh 04-May-07): */ +1.284 (drh 04-May-07): #ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT +1.318 (drh 30-Aug-07): static u64 floatSwap(u64 in){ +1.284 (drh 04-May-07): union { +1.318 (drh 30-Aug-07): u64 r; +1.284 (drh 04-May-07): u32 i[2]; +1.284 (drh 04-May-07): } u; +1.284 (drh 04-May-07): u32 t; +1.284 (drh 04-May-07): +1.284 (drh 04-May-07): u.r = in; +1.284 (drh 04-May-07): t = u.i[0]; +1.284 (drh 04-May-07): u.i[0] = u.i[1]; +1.284 (drh 04-May-07): u.i[1] = t; +1.284 (drh 04-May-07): return u.r; +1.284 (drh 04-May-07): } +1.284 (drh 04-May-07): # define swapMixedEndianFloat(X) X = floatSwap(X) +1.284 (drh 04-May-07): #else +1.284 (drh 04-May-07): # define swapMixedEndianFloat(X) +1.284 (drh 04-May-07): #endif +1.284 (drh 04-May-07): +1.284 (drh 04-May-07): /* +1.28 (danielk1 12-May-04): ** Write the serialized data blob for the value stored in pMem into +1.28 (danielk1 12-May-04): ** buf. It is assumed that the caller has allocated sufficient space. +1.28 (danielk1 12-May-04): ** Return the number of bytes written. +1.283 (drh 02-May-07): ** +1.283 (drh 02-May-07): ** nBuf is the amount of space left in buf[]. nBuf must always be +1.283 (drh 02-May-07): ** large enough to hold the entire field. Except, if the field is +1.283 (drh 02-May-07): ** a blob with a zero-filled tail, then buf[] might be just the right +1.283 (drh 02-May-07): ** size to hold everything except for the zero-filled tail. If buf[] +1.283 (drh 02-May-07): ** is only big enough to hold the non-zero prefix, then only write that +1.283 (drh 02-May-07): ** prefix into buf[]. But if buf[] is large enough to hold both the +1.283 (drh 02-May-07): ** prefix and the tail then write the prefix and set the tail to all +1.283 (drh 02-May-07): ** zeros. +1.283 (drh 02-May-07): ** +1.283 (drh 02-May-07): ** Return the number of bytes actually written into buf[]. The number +1.283 (drh 02-May-07): ** of bytes in the zero-filled tail is included in the return value only +1.283 (drh 02-May-07): ** if those bytes were zeroed in buf[]. +1.28 (danielk1 12-May-04): */ +1.283 (drh 02-May-07): int sqlite3VdbeSerialPut(u8 *buf, int nBuf, Mem *pMem, int file_format){ +1.215 (drh 29-Dec-05): u32 serial_type = sqlite3VdbeSerialType(pMem, file_format); +1.28 (danielk1 12-May-04): int len; +1.30 (danielk1 13-May-04): +1.54 (drh 21-May-04): /* Integer and Real */ +1.215 (drh 29-Dec-05): if( serial_type<=7 && serial_type>0 ){ +1.54 (drh 21-May-04): u64 v; +1.54 (drh 21-May-04): int i; +1.84 (drh 30-May-04): if( serial_type==7 ){ +1.272 (drh 26-Mar-07): assert( sizeof(v)==sizeof(pMem->r) ); +1.272 (drh 26-Mar-07): memcpy(&v, &pMem->r, sizeof(v)); +1.318 (drh 30-Aug-07): swapMixedEndianFloat(v); +1.54 (drh 21-May-04): }else{ +1.276 (drh 30-Mar-07): v = pMem->u.i; +1.54 (drh 21-May-04): } +1.54 (drh 21-May-04): len = i = sqlite3VdbeSerialTypeLen(serial_type); +1.283 (drh 02-May-07): assert( len<=nBuf ); +1.54 (drh 21-May-04): while( i-- ){ +1.54 (drh 21-May-04): buf[i] = (v&0xFF); +1.54 (drh 21-May-04): v >>= 8; +1.54 (drh 21-May-04): } +1.54 (drh 21-May-04): return len; +1.28 (danielk1 12-May-04): } +1.215 (drh 29-Dec-05): +1.28 (danielk1 12-May-04): /* String or blob */ +1.215 (drh 29-Dec-05): if( serial_type>=12 ){ +1.283 (drh 02-May-07): assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.i:0) +1.283 (drh 02-May-07): == sqlite3VdbeSerialTypeLen(serial_type) ); +1.283 (drh 02-May-07): assert( pMem->n<=nBuf ); +1.283 (drh 02-May-07): len = pMem->n; +1.215 (drh 29-Dec-05): memcpy(buf, pMem->z, len); +1.283 (drh 02-May-07): if( pMem->flags & MEM_Zero ){ +1.283 (drh 02-May-07): len += pMem->u.i; +1.283 (drh 02-May-07): if( len>nBuf ){ +1.283 (drh 02-May-07): len = nBuf; +1.283 (drh 02-May-07): } +1.283 (drh 02-May-07): memset(&buf[pMem->n], 0, len-pMem->n); +1.283 (drh 02-May-07): } +1.215 (drh 29-Dec-05): return len; +1.215 (drh 29-Dec-05): } +1.215 (drh 29-Dec-05): +1.215 (drh 29-Dec-05): /* NULL or constants 0 or 1 */ +1.215 (drh 29-Dec-05): return 0; +1.22 (danielk1 10-May-04): } +1.22 (danielk1 10-May-04): +1.22 (danielk1 10-May-04): /* +1.28 (danielk1 12-May-04): ** Deserialize the data blob pointed to by buf as serial type serial_type +1.28 (danielk1 12-May-04): ** and store the result in pMem. Return the number of bytes read. +1.28 (danielk1 12-May-04): */ +1.55 (danielk1 22-May-04): int sqlite3VdbeSerialGet( +1.58 (danielk1 23-May-04): const unsigned char *buf, /* Buffer to deserialize from */ +1.75 (drh 28-May-04): u32 serial_type, /* Serial type to deserialize */ +1.75 (drh 28-May-04): Mem *pMem /* Memory cell to write value into */ +1.55 (danielk1 22-May-04): ){ +1.177 (drh 21-May-05): switch( serial_type ){ +1.177 (drh 21-May-05): case 10: /* Reserved for future use */ +1.177 (drh 21-May-05): case 11: /* Reserved for future use */ +1.177 (drh 21-May-05): case 0: { /* NULL */ +1.177 (drh 21-May-05): pMem->flags = MEM_Null; +1.177 (drh 21-May-05): break; +1.177 (drh 21-May-05): } +1.177 (drh 21-May-05): case 1: { /* 1-byte signed integer */ +1.276 (drh 30-Mar-07): pMem->u.i = (signed char)buf[0]; +1.177 (drh 21-May-05): pMem->flags = MEM_Int; +1.177 (drh 21-May-05): return 1; +1.177 (drh 21-May-05): } +1.177 (drh 21-May-05): case 2: { /* 2-byte signed integer */ +1.276 (drh 30-Mar-07): pMem->u.i = (((signed char)buf[0])<<8) | buf[1]; +1.177 (drh 21-May-05): pMem->flags = MEM_Int; +1.177 (drh 21-May-05): return 2; +1.177 (drh 21-May-05): } +1.177 (drh 21-May-05): case 3: { /* 3-byte signed integer */ +1.276 (drh 30-Mar-07): pMem->u.i = (((signed char)buf[0])<<16) | (buf[1]<<8) | buf[2]; +1.177 (drh 21-May-05): pMem->flags = MEM_Int; +1.177 (drh 21-May-05): return 3; +1.177 (drh 21-May-05): } +1.177 (drh 21-May-05): case 4: { /* 4-byte signed integer */ +1.276 (drh 30-Mar-07): pMem->u.i = (buf[0]<<24) | (buf[1]<<16) | (buf[2]<<8) | buf[3]; +1.177 (drh 21-May-05): pMem->flags = MEM_Int; +1.177 (drh 21-May-05): return 4; +1.177 (drh 21-May-05): } +1.177 (drh 21-May-05): case 5: { /* 6-byte signed integer */ +1.177 (drh 21-May-05): u64 x = (((signed char)buf[0])<<8) | buf[1]; +1.177 (drh 21-May-05): u32 y = (buf[2]<<24) | (buf[3]<<16) | (buf[4]<<8) | buf[5]; +1.177 (drh 21-May-05): x = (x<<32) | y; +1.276 (drh 30-Mar-07): pMem->u.i = *(i64*)&x; +1.83 (drh 30-May-04): pMem->flags = MEM_Int; +1.177 (drh 21-May-05): return 6; +1.177 (drh 21-May-05): } +1.186 (drh 18-Aug-05): case 6: /* 8-byte signed integer */ +1.177 (drh 21-May-05): case 7: { /* IEEE floating point */ +1.193 (drh 05-Sep-05): u64 x; +1.193 (drh 05-Sep-05): u32 y; +1.232 (drh 23-Jan-06): #if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT) +1.189 (drh 28-Aug-05): /* Verify that integers and floating point values use the same +1.284 (drh 04-May-07): ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is +1.284 (drh 04-May-07): ** defined that 64-bit floating point values really are mixed +1.284 (drh 04-May-07): ** endian. +1.190 (drh 28-Aug-05): */ +1.189 (drh 28-Aug-05): static const u64 t1 = ((u64)0x3ff00000)<<32; +1.272 (drh 26-Mar-07): static const double r1 = 1.0; +1.318 (drh 30-Aug-07): u64 t2 = t1; +1.318 (drh 30-Aug-07): swapMixedEndianFloat(t2); +1.318 (drh 30-Aug-07): assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 ); +1.189 (drh 28-Aug-05): #endif +1.190 (drh 28-Aug-05): +1.193 (drh 05-Sep-05): x = (buf[0]<<24) | (buf[1]<<16) | (buf[2]<<8) | buf[3]; +1.193 (drh 05-Sep-05): y = (buf[4]<<24) | (buf[5]<<16) | (buf[6]<<8) | buf[7]; +1.177 (drh 21-May-05): x = (x<<32) | y; +1.177 (drh 21-May-05): if( serial_type==6 ){ +1.276 (drh 30-Mar-07): pMem->u.i = *(i64*)&x; +1.177 (drh 21-May-05): pMem->flags = MEM_Int; +1.177 (drh 21-May-05): }else{ +1.272 (drh 26-Mar-07): assert( sizeof(x)==8 && sizeof(pMem->r)==8 ); +1.318 (drh 30-Aug-07): swapMixedEndianFloat(x); +1.272 (drh 26-Mar-07): memcpy(&pMem->r, &x, sizeof(x)); +1.177 (drh 21-May-05): pMem->flags = MEM_Real; +1.83 (drh 30-May-04): } +1.177 (drh 21-May-05): return 8; +1.177 (drh 21-May-05): } +1.215 (drh 29-Dec-05): case 8: /* Integer 0 */ +1.215 (drh 29-Dec-05): case 9: { /* Integer 1 */ +1.276 (drh 30-Mar-07): pMem->u.i = serial_type-8; +1.215 (drh 29-Dec-05): pMem->flags = MEM_Int; +1.215 (drh 29-Dec-05): return 0; +1.215 (drh 29-Dec-05): } +1.177 (drh 21-May-05): default: { +1.177 (drh 21-May-05): int len = (serial_type-12)/2; +1.177 (drh 21-May-05): pMem->z = (char *)buf; +1.177 (drh 21-May-05): pMem->n = len; +1.177 (drh 21-May-05): pMem->xDel = 0; +1.177 (drh 21-May-05): if( serial_type&0x01 ){ +1.177 (drh 21-May-05): pMem->flags = MEM_Str | MEM_Ephem; +1.83 (drh 30-May-04): }else{ +1.177 (drh 21-May-05): pMem->flags = MEM_Blob | MEM_Ephem; +1.83 (drh 30-May-04): } +1.177 (drh 21-May-05): return len; +1.81 (drh 30-May-04): } +1.23 (danielk1 10-May-04): } +1.177 (drh 21-May-05): return 0; +1.22 (danielk1 10-May-04): } +1.22 (danielk1 10-May-04): +1.22 (danielk1 10-May-04): /* +1.223 (drh 12-Jan-06): ** The header of a record consists of a sequence variable-length integers. +1.223 (drh 12-Jan-06): ** These integers are almost always small and are encoded as a single byte. +1.223 (drh 12-Jan-06): ** The following macro takes advantage this fact to provide a fast decode +1.223 (drh 12-Jan-06): ** of the integers in a record header. It is faster for the common case +1.223 (drh 12-Jan-06): ** where the integer is a single byte. It is a little slower when the +1.223 (drh 12-Jan-06): ** integer is two or more bytes. But overall it is faster. +1.223 (drh 12-Jan-06): ** +1.223 (drh 12-Jan-06): ** The following expressions are equivalent: +1.223 (drh 12-Jan-06): ** +1.223 (drh 12-Jan-06): ** x = sqlite3GetVarint32( A, &B ); +1.223 (drh 12-Jan-06): ** +1.223 (drh 12-Jan-06): ** x = GetVarint( A, B ); +1.223 (drh 12-Jan-06): ** +1.223 (drh 12-Jan-06): */ +1.223 (drh 12-Jan-06): #define GetVarint(A,B) ((B = *(A))<=0x7f ? 1 : sqlite3GetVarint32(A, &B)) +1.223 (drh 12-Jan-06): +1.223 (drh 12-Jan-06): /* +1.90 (drh 02-Jun-04): ** This function compares the two table rows or index records specified by +1.38 (danielk1 18-May-04): ** {nKey1, pKey1} and {nKey2, pKey2}, returning a negative, zero +1.38 (danielk1 18-May-04): ** or positive integer if {nKey1, pKey1} is less than, equal to or +1.90 (drh 02-Jun-04): ** greater than {nKey2, pKey2}. Both Key1 and Key2 must be byte strings +1.90 (drh 02-Jun-04): ** composed by the OP_MakeRecord opcode of the VDBE. +1.38 (danielk1 18-May-04): */ +1.90 (drh 02-Jun-04): int sqlite3VdbeRecordCompare( +1.38 (danielk1 18-May-04): void *userData, +1.38 (danielk1 18-May-04): int nKey1, const void *pKey1, +1.38 (danielk1 18-May-04): int nKey2, const void *pKey2 +1.38 (danielk1 18-May-04): ){ +1.50 (drh 20-May-04): KeyInfo *pKeyInfo = (KeyInfo*)userData; +1.73 (drh 27-May-04): u32 d1, d2; /* Offset into aKey[] of next data element */ +1.73 (drh 27-May-04): u32 idx1, idx2; /* Offset into aKey[] of next header element */ +1.73 (drh 27-May-04): u32 szHdr1, szHdr2; /* Number of bytes in header */ +1.73 (drh 27-May-04): int i = 0; +1.73 (drh 27-May-04): int nField; +1.73 (drh 27-May-04): int rc = 0; +1.38 (danielk1 18-May-04): const unsigned char *aKey1 = (const unsigned char *)pKey1; +1.38 (danielk1 18-May-04): const unsigned char *aKey2 = (const unsigned char *)pKey2; +1.96 (danielk1 09-Jun-04): +1.96 (danielk1 09-Jun-04): Mem mem1; +1.96 (danielk1 09-Jun-04): Mem mem2; +1.96 (danielk1 09-Jun-04): mem1.enc = pKeyInfo->enc; +1.307 (drh 21-Aug-07): mem1.db = pKeyInfo->db; +1.96 (danielk1 09-Jun-04): mem2.enc = pKeyInfo->enc; +1.307 (drh 21-Aug-07): mem2.db = pKeyInfo->db; +1.73 (drh 27-May-04): +1.223 (drh 12-Jan-06): idx1 = GetVarint(aKey1, szHdr1); +1.73 (drh 27-May-04): d1 = szHdr1; +1.223 (drh 12-Jan-06): idx2 = GetVarint(aKey2, szHdr2); +1.73 (drh 27-May-04): d2 = szHdr2; +1.73 (drh 27-May-04): nField = pKeyInfo->nField; +1.76 (drh 28-May-04): while( idx1=nKey1 && sqlite3VdbeSerialTypeLen(serial_type1)>0 ) break; +1.223 (drh 12-Jan-06): idx2 += GetVarint( aKey2+idx2, serial_type2 ); +1.76 (drh 28-May-04): if( d2>=nKey2 && sqlite3VdbeSerialTypeLen(serial_type2)>0 ) break; +1.39 (danielk1 18-May-04): +1.267 (drh 27-Oct-06): /* Extract the values to be compared. +1.39 (danielk1 18-May-04): */ +1.75 (drh 28-May-04): d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1); +1.75 (drh 28-May-04): d2 += sqlite3VdbeSerialGet(&aKey2[d2], serial_type2, &mem2); +1.39 (danielk1 18-May-04): +1.267 (drh 27-Oct-06): /* Do the comparison +1.267 (drh 27-Oct-06): */ +1.76 (drh 28-May-04): rc = sqlite3MemCompare(&mem1, &mem2, iaColl[i] : 0); +1.177 (drh 21-May-05): if( mem1.flags & MEM_Dyn ) sqlite3VdbeMemRelease(&mem1); +1.177 (drh 21-May-05): if( mem2.flags & MEM_Dyn ) sqlite3VdbeMemRelease(&mem2); +1.39 (danielk1 18-May-04): if( rc!=0 ){ +1.73 (drh 27-May-04): break; +1.39 (danielk1 18-May-04): } +1.73 (drh 27-May-04): i++; +1.73 (drh 27-May-04): } +1.73 (drh 27-May-04): +1.73 (drh 27-May-04): /* One of the keys ran out of fields, but all the fields up to that point +1.73 (drh 27-May-04): ** were equal. If the incrKey flag is true, then the second key is +1.73 (drh 27-May-04): ** treated as larger. +1.73 (drh 27-May-04): */ +1.73 (drh 27-May-04): if( rc==0 ){ +1.73 (drh 27-May-04): if( pKeyInfo->incrKey ){ +1.73 (drh 27-May-04): rc = -1; +1.73 (drh 27-May-04): }else if( d1aSortOrder && inField +1.214 (drh 21-Dec-05): && pKeyInfo->aSortOrder[i] ){ +1.73 (drh 27-May-04): rc = -rc; +1.39 (danielk1 18-May-04): } +1.39 (danielk1 18-May-04): +1.73 (drh 27-May-04): return rc; +1.38 (danielk1 18-May-04): } +1.76 (drh 28-May-04): +1.76 (drh 28-May-04): /* +1.90 (drh 02-Jun-04): ** The argument is an index entry composed using the OP_MakeRecord opcode. +1.90 (drh 02-Jun-04): ** The last entry in this record should be an integer (specifically +1.90 (drh 02-Jun-04): ** an integer rowid). This routine returns the number of bytes in +1.90 (drh 02-Jun-04): ** that integer. +1.76 (drh 28-May-04): */ +1.237 (drh 24-Feb-06): int sqlite3VdbeIdxRowidLen(const u8 *aKey){ +1.76 (drh 28-May-04): u32 szHdr; /* Size of the header */ +1.76 (drh 28-May-04): u32 typeRowid; /* Serial type of the rowid */ +1.76 (drh 28-May-04): +1.76 (drh 28-May-04): sqlite3GetVarint32(aKey, &szHdr); +1.76 (drh 28-May-04): sqlite3GetVarint32(&aKey[szHdr-1], &typeRowid); +1.76 (drh 28-May-04): return sqlite3VdbeSerialTypeLen(typeRowid); +1.76 (drh 28-May-04): } +1.38 (danielk1 18-May-04): +1.30 (danielk1 13-May-04): +1.30 (danielk1 13-May-04): /* +1.90 (drh 02-Jun-04): ** pCur points at an index entry created using the OP_MakeRecord opcode. +1.90 (drh 02-Jun-04): ** Read the rowid (the last field in the record) and store it in *rowid. +1.90 (drh 02-Jun-04): ** Return SQLITE_OK if everything works, or an error code otherwise. +1.30 (danielk1 13-May-04): */ +1.307 (drh 21-Aug-07): int sqlite3VdbeIdxRowid(BtCursor *pCur, i64 *rowid){ +1.279 (drh 01-Apr-07): i64 nCellKey = 0; +1.30 (danielk1 13-May-04): int rc; +1.76 (drh 28-May-04): u32 szHdr; /* Size of the header */ +1.76 (drh 28-May-04): u32 typeRowid; /* Serial type of the rowid */ +1.76 (drh 28-May-04): u32 lenRowid; /* Size of the rowid */ +1.76 (drh 28-May-04): Mem m, v; +1.30 (danielk1 13-May-04): +1.76 (drh 28-May-04): sqlite3BtreeKeySize(pCur, &nCellKey); +1.76 (drh 28-May-04): if( nCellKey<=0 ){ +1.200 (drh 17-Sep-05): return SQLITE_CORRUPT_BKPT; +1.76 (drh 28-May-04): } +1.307 (drh 21-Aug-07): rc = sqlite3VdbeMemFromBtree(pCur, 0, nCellKey, 1, &m); +1.76 (drh 28-May-04): if( rc ){ +1.30 (danielk1 13-May-04): return rc; +1.30 (danielk1 13-May-04): } +1.210 (drh 09-Dec-05): sqlite3GetVarint32((u8*)m.z, &szHdr); +1.210 (drh 09-Dec-05): sqlite3GetVarint32((u8*)&m.z[szHdr-1], &typeRowid); +1.76 (drh 28-May-04): lenRowid = sqlite3VdbeSerialTypeLen(typeRowid); +1.210 (drh 09-Dec-05): sqlite3VdbeSerialGet((u8*)&m.z[m.n-lenRowid], typeRowid, &v); +1.276 (drh 30-Mar-07): *rowid = v.u.i; +1.105 (danielk1 12-Jun-04): sqlite3VdbeMemRelease(&m); +1.30 (danielk1 13-May-04): return SQLITE_OK; +1.30 (danielk1 13-May-04): } +1.30 (danielk1 13-May-04): +1.44 (drh 19-May-04): /* +1.50 (drh 20-May-04): ** Compare the key of the index entry that cursor pC is point to against +1.44 (drh 19-May-04): ** the key string in pKey (of length nKey). Write into *pRes a number +1.44 (drh 19-May-04): ** that is negative, zero, or positive if pC is less than, equal to, +1.44 (drh 19-May-04): ** or greater than pKey. Return SQLITE_OK on success. +1.50 (drh 20-May-04): ** +1.76 (drh 28-May-04): ** pKey is either created without a rowid or is truncated so that it +1.76 (drh 28-May-04): ** omits the rowid at the end. The rowid at the end of the index entry +1.76 (drh 28-May-04): ** is ignored as well. +1.44 (drh 19-May-04): */ +1.30 (danielk1 13-May-04): int sqlite3VdbeIdxKeyCompare( +1.44 (drh 19-May-04): Cursor *pC, /* The cursor to compare against */ +1.44 (drh 19-May-04): int nKey, const u8 *pKey, /* The key to compare */ +1.44 (drh 19-May-04): int *res /* Write the comparison result here */ +1.30 (danielk1 13-May-04): ){ +1.279 (drh 01-Apr-07): i64 nCellKey = 0; +1.30 (danielk1 13-May-04): int rc; +1.31 (danielk1 14-May-04): BtCursor *pCur = pC->pCursor; +1.76 (drh 28-May-04): int lenRowid; +1.76 (drh 28-May-04): Mem m; +1.30 (danielk1 13-May-04): +1.30 (danielk1 13-May-04): sqlite3BtreeKeySize(pCur, &nCellKey); +1.30 (danielk1 13-May-04): if( nCellKey<=0 ){ +1.30 (danielk1 13-May-04): *res = 0; +1.30 (danielk1 13-May-04): return SQLITE_OK; +1.30 (danielk1 13-May-04): } +1.307 (drh 21-Aug-07): rc = sqlite3VdbeMemFromBtree(pC->pCursor, 0, nCellKey, 1, &m); +1.76 (drh 28-May-04): if( rc ){ +1.76 (drh 28-May-04): return rc; +1.30 (danielk1 13-May-04): } +1.237 (drh 24-Feb-06): lenRowid = sqlite3VdbeIdxRowidLen((u8*)m.z); +1.90 (drh 02-Jun-04): *res = sqlite3VdbeRecordCompare(pC->pKeyInfo, m.n-lenRowid, m.z, nKey, pKey); +1.105 (danielk1 12-Jun-04): sqlite3VdbeMemRelease(&m); +1.30 (danielk1 13-May-04): return SQLITE_OK; +1.62 (danielk1 25-May-04): } +1.121 (danielk1 21-Jun-04): +1.121 (danielk1 21-Jun-04): /* +1.121 (danielk1 21-Jun-04): ** This routine sets the value to be returned by subsequent calls to +1.121 (danielk1 21-Jun-04): ** sqlite3_changes() on the database handle 'db'. +1.121 (danielk1 21-Jun-04): */ +1.121 (danielk1 21-Jun-04): void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){ +1.307 (drh 21-Aug-07): assert( sqlite3_mutex_held(db->mutex) ); +1.121 (danielk1 21-Jun-04): db->nChange = nChange; +1.121 (danielk1 21-Jun-04): db->nTotalChange += nChange; +1.121 (danielk1 21-Jun-04): } +1.121 (danielk1 21-Jun-04): +1.121 (danielk1 21-Jun-04): /* +1.121 (danielk1 21-Jun-04): ** Set a flag in the vdbe to update the change counter when it is finalised +1.121 (danielk1 21-Jun-04): ** or reset. +1.121 (danielk1 21-Jun-04): */ +1.152 (drh 05-Nov-04): void sqlite3VdbeCountChanges(Vdbe *v){ +1.152 (drh 05-Nov-04): v->changeCntOn = 1; +1.121 (danielk1 21-Jun-04): } +1.159 (drh 22-Jan-05): +1.159 (drh 22-Jan-05): /* +1.159 (drh 22-Jan-05): ** Mark every prepared statement associated with a database connection +1.159 (drh 22-Jan-05): ** as expired. +1.159 (drh 22-Jan-05): ** +1.159 (drh 22-Jan-05): ** An expired statement means that recompilation of the statement is +1.159 (drh 22-Jan-05): ** recommend. Statements expire when things happen that make their +1.159 (drh 22-Jan-05): ** programs obsolete. Removing user-defined functions or collating +1.159 (drh 22-Jan-05): ** sequences, or changing an authorization function are the types of +1.159 (drh 22-Jan-05): ** things that make prepared statements obsolete. +1.159 (drh 22-Jan-05): */ +1.159 (drh 22-Jan-05): void sqlite3ExpirePreparedStatements(sqlite3 *db){ +1.159 (drh 22-Jan-05): Vdbe *p; +1.159 (drh 22-Jan-05): for(p = db->pVdbe; p; p=p->pNext){ +1.159 (drh 22-Jan-05): p->expired = 1; +1.159 (drh 22-Jan-05): } +1.159 (drh 22-Jan-05): } +1.167 (danielk1 09-Mar-05): +1.167 (danielk1 09-Mar-05): /* +1.167 (danielk1 09-Mar-05): ** Return the database associated with the Vdbe. +1.167 (danielk1 09-Mar-05): */ +1.167 (danielk1 09-Mar-05): sqlite3 *sqlite3VdbeDb(Vdbe *v){ +1.167 (danielk1 09-Mar-05): return v->db; +1.167 (danielk1 09-Mar-05): } diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/expr.c b/libraries/sqlite/unix/sqlite-3.5.1/src/expr.c new file mode 100644 index 0000000000..0a7091a09d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/expr.c @@ -0,0 +1,2617 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains routines used for analyzing expressions and +** for generating VDBE code that evaluates expressions in SQLite. +** +** $Id: expr.c,v 1.313 2007/09/18 15:55:07 drh Exp $ +*/ +#include "sqliteInt.h" +#include + +/* +** Return the 'affinity' of the expression pExpr if any. +** +** If pExpr is a column, a reference to a column via an 'AS' alias, +** or a sub-select with a column as the return value, then the +** affinity of that column is returned. Otherwise, 0x00 is returned, +** indicating no affinity for the expression. +** +** i.e. the WHERE clause expresssions in the following statements all +** have an affinity: +** +** CREATE TABLE t1(a); +** SELECT * FROM t1 WHERE a; +** SELECT a AS b FROM t1 WHERE b; +** SELECT * FROM t1 WHERE (select a from t1); +*/ +char sqlite3ExprAffinity(Expr *pExpr){ + int op = pExpr->op; + if( op==TK_SELECT ){ + return sqlite3ExprAffinity(pExpr->pSelect->pEList->a[0].pExpr); + } +#ifndef SQLITE_OMIT_CAST + if( op==TK_CAST ){ + return sqlite3AffinityType(&pExpr->token); + } +#endif + return pExpr->affinity; +} + +/* +** Set the collating sequence for expression pExpr to be the collating +** sequence named by pToken. Return a pointer to the revised expression. +** The collating sequence is marked as "explicit" using the EP_ExpCollate +** flag. An explicit collating sequence will override implicit +** collating sequences. +*/ +Expr *sqlite3ExprSetColl(Parse *pParse, Expr *pExpr, Token *pName){ + CollSeq *pColl; + if( pExpr==0 ) return 0; + pColl = sqlite3LocateCollSeq(pParse, (char*)pName->z, pName->n); + if( pColl ){ + pExpr->pColl = pColl; + pExpr->flags |= EP_ExpCollate; + } + return pExpr; +} + +/* +** Return the default collation sequence for the expression pExpr. If +** there is no default collation type, return 0. +*/ +CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){ + CollSeq *pColl = 0; + if( pExpr ){ + int op; + pColl = pExpr->pColl; + op = pExpr->op; + if( (op==TK_CAST || op==TK_UPLUS) && !pColl ){ + return sqlite3ExprCollSeq(pParse, pExpr->pLeft); + } + } + if( sqlite3CheckCollSeq(pParse, pColl) ){ + pColl = 0; + } + return pColl; +} + +/* +** pExpr is an operand of a comparison operator. aff2 is the +** type affinity of the other operand. This routine returns the +** type affinity that should be used for the comparison operator. +*/ +char sqlite3CompareAffinity(Expr *pExpr, char aff2){ + char aff1 = sqlite3ExprAffinity(pExpr); + if( aff1 && aff2 ){ + /* Both sides of the comparison are columns. If one has numeric + ** affinity, use that. Otherwise use no affinity. + */ + if( sqlite3IsNumericAffinity(aff1) || sqlite3IsNumericAffinity(aff2) ){ + return SQLITE_AFF_NUMERIC; + }else{ + return SQLITE_AFF_NONE; + } + }else if( !aff1 && !aff2 ){ + /* Neither side of the comparison is a column. Compare the + ** results directly. + */ + return SQLITE_AFF_NONE; + }else{ + /* One side is a column, the other is not. Use the columns affinity. */ + assert( aff1==0 || aff2==0 ); + return (aff1 + aff2); + } +} + +/* +** pExpr is a comparison operator. Return the type affinity that should +** be applied to both operands prior to doing the comparison. +*/ +static char comparisonAffinity(Expr *pExpr){ + char aff; + assert( pExpr->op==TK_EQ || pExpr->op==TK_IN || pExpr->op==TK_LT || + pExpr->op==TK_GT || pExpr->op==TK_GE || pExpr->op==TK_LE || + pExpr->op==TK_NE ); + assert( pExpr->pLeft ); + aff = sqlite3ExprAffinity(pExpr->pLeft); + if( pExpr->pRight ){ + aff = sqlite3CompareAffinity(pExpr->pRight, aff); + } + else if( pExpr->pSelect ){ + aff = sqlite3CompareAffinity(pExpr->pSelect->pEList->a[0].pExpr, aff); + } + else if( !aff ){ + aff = SQLITE_AFF_NONE; + } + return aff; +} + +/* +** pExpr is a comparison expression, eg. '=', '<', IN(...) etc. +** idx_affinity is the affinity of an indexed column. Return true +** if the index with affinity idx_affinity may be used to implement +** the comparison in pExpr. +*/ +int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity){ + char aff = comparisonAffinity(pExpr); + switch( aff ){ + case SQLITE_AFF_NONE: + return 1; + case SQLITE_AFF_TEXT: + return idx_affinity==SQLITE_AFF_TEXT; + default: + return sqlite3IsNumericAffinity(idx_affinity); + } +} + +/* +** Return the P1 value that should be used for a binary comparison +** opcode (OP_Eq, OP_Ge etc.) used to compare pExpr1 and pExpr2. +** If jumpIfNull is true, then set the low byte of the returned +** P1 value to tell the opcode to jump if either expression +** evaluates to NULL. +*/ +static int binaryCompareP1(Expr *pExpr1, Expr *pExpr2, int jumpIfNull){ + char aff = sqlite3ExprAffinity(pExpr2); + return ((int)sqlite3CompareAffinity(pExpr1, aff))+(jumpIfNull?0x100:0); +} + +/* +** Return a pointer to the collation sequence that should be used by +** a binary comparison operator comparing pLeft and pRight. +** +** If the left hand expression has a collating sequence type, then it is +** used. Otherwise the collation sequence for the right hand expression +** is used, or the default (BINARY) if neither expression has a collating +** type. +** +** Argument pRight (but not pLeft) may be a null pointer. In this case, +** it is not considered. +*/ +CollSeq *sqlite3BinaryCompareCollSeq( + Parse *pParse, + Expr *pLeft, + Expr *pRight +){ + CollSeq *pColl; + assert( pLeft ); + if( pLeft->flags & EP_ExpCollate ){ + assert( pLeft->pColl ); + pColl = pLeft->pColl; + }else if( pRight && pRight->flags & EP_ExpCollate ){ + assert( pRight->pColl ); + pColl = pRight->pColl; + }else{ + pColl = sqlite3ExprCollSeq(pParse, pLeft); + if( !pColl ){ + pColl = sqlite3ExprCollSeq(pParse, pRight); + } + } + return pColl; +} + +/* +** Generate code for a comparison operator. +*/ +static int codeCompare( + Parse *pParse, /* The parsing (and code generating) context */ + Expr *pLeft, /* The left operand */ + Expr *pRight, /* The right operand */ + int opcode, /* The comparison opcode */ + int dest, /* Jump here if true. */ + int jumpIfNull /* If true, jump if either operand is NULL */ +){ + int p1 = binaryCompareP1(pLeft, pRight, jumpIfNull); + CollSeq *p3 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight); + return sqlite3VdbeOp3(pParse->pVdbe, opcode, p1, dest, (void*)p3, P3_COLLSEQ); +} + +/* +** Construct a new expression node and return a pointer to it. Memory +** for this node is obtained from sqlite3_malloc(). The calling function +** is responsible for making sure the node eventually gets freed. +*/ +Expr *sqlite3Expr( + sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ + int op, /* Expression opcode */ + Expr *pLeft, /* Left operand */ + Expr *pRight, /* Right operand */ + const Token *pToken /* Argument token */ +){ + Expr *pNew; + pNew = sqlite3DbMallocZero(db, sizeof(Expr)); + if( pNew==0 ){ + /* When malloc fails, delete pLeft and pRight. Expressions passed to + ** this function must always be allocated with sqlite3Expr() for this + ** reason. + */ + sqlite3ExprDelete(pLeft); + sqlite3ExprDelete(pRight); + return 0; + } + pNew->op = op; + pNew->pLeft = pLeft; + pNew->pRight = pRight; + pNew->iAgg = -1; + if( pToken ){ + assert( pToken->dyn==0 ); + pNew->span = pNew->token = *pToken; + }else if( pLeft ){ + if( pRight ){ + sqlite3ExprSpan(pNew, &pLeft->span, &pRight->span); + if( pRight->flags & EP_ExpCollate ){ + pNew->flags |= EP_ExpCollate; + pNew->pColl = pRight->pColl; + } + } + if( pLeft->flags & EP_ExpCollate ){ + pNew->flags |= EP_ExpCollate; + pNew->pColl = pLeft->pColl; + } + } + + sqlite3ExprSetHeight(pNew); + return pNew; +} + +/* +** Works like sqlite3Expr() except that it takes an extra Parse* +** argument and notifies the associated connection object if malloc fails. +*/ +Expr *sqlite3PExpr( + Parse *pParse, /* Parsing context */ + int op, /* Expression opcode */ + Expr *pLeft, /* Left operand */ + Expr *pRight, /* Right operand */ + const Token *pToken /* Argument token */ +){ + return sqlite3Expr(pParse->db, op, pLeft, pRight, pToken); +} + +/* +** When doing a nested parse, you can include terms in an expression +** that look like this: #0 #1 #2 ... These terms refer to elements +** on the stack. "#0" means the top of the stack. +** "#1" means the next down on the stack. And so forth. +** +** This routine is called by the parser to deal with on of those terms. +** It immediately generates code to store the value in a memory location. +** The returns an expression that will code to extract the value from +** that memory location as needed. +*/ +Expr *sqlite3RegisterExpr(Parse *pParse, Token *pToken){ + Vdbe *v = pParse->pVdbe; + Expr *p; + int depth; + if( pParse->nested==0 ){ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", pToken); + return sqlite3PExpr(pParse, TK_NULL, 0, 0, 0); + } + if( v==0 ) return 0; + p = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, pToken); + if( p==0 ){ + return 0; /* Malloc failed */ + } + depth = atoi((char*)&pToken->z[1]); + p->iTable = pParse->nMem++; + sqlite3VdbeAddOp(v, OP_Dup, depth, 0); + sqlite3VdbeAddOp(v, OP_MemStore, p->iTable, 1); + return p; +} + +/* +** Join two expressions using an AND operator. If either expression is +** NULL, then just return the other expression. +*/ +Expr *sqlite3ExprAnd(sqlite3 *db, Expr *pLeft, Expr *pRight){ + if( pLeft==0 ){ + return pRight; + }else if( pRight==0 ){ + return pLeft; + }else{ + return sqlite3Expr(db, TK_AND, pLeft, pRight, 0); + } +} + +/* +** Set the Expr.span field of the given expression to span all +** text between the two given tokens. +*/ +void sqlite3ExprSpan(Expr *pExpr, Token *pLeft, Token *pRight){ + assert( pRight!=0 ); + assert( pLeft!=0 ); + if( pExpr && pRight->z && pLeft->z ){ + assert( pLeft->dyn==0 || pLeft->z[pLeft->n]==0 ); + if( pLeft->dyn==0 && pRight->dyn==0 ){ + pExpr->span.z = pLeft->z; + pExpr->span.n = pRight->n + (pRight->z - pLeft->z); + }else{ + pExpr->span.z = 0; + } + } +} + +/* +** Construct a new expression node for a function with multiple +** arguments. +*/ +Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *pToken){ + Expr *pNew; + assert( pToken ); + pNew = sqlite3DbMallocZero(pParse->db, sizeof(Expr) ); + if( pNew==0 ){ + sqlite3ExprListDelete(pList); /* Avoid leaking memory when malloc fails */ + return 0; + } + pNew->op = TK_FUNCTION; + pNew->pList = pList; + assert( pToken->dyn==0 ); + pNew->token = *pToken; + pNew->span = pNew->token; + + sqlite3ExprSetHeight(pNew); + return pNew; +} + +/* +** Assign a variable number to an expression that encodes a wildcard +** in the original SQL statement. +** +** Wildcards consisting of a single "?" are assigned the next sequential +** variable number. +** +** Wildcards of the form "?nnn" are assigned the number "nnn". We make +** sure "nnn" is not too be to avoid a denial of service attack when +** the SQL statement comes from an external source. +** +** Wildcards of the form ":aaa" or "$aaa" are assigned the same number +** as the previous instance of the same wildcard. Or if this is the first +** instance of the wildcard, the next sequenial variable number is +** assigned. +*/ +void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){ + Token *pToken; + sqlite3 *db = pParse->db; + + if( pExpr==0 ) return; + pToken = &pExpr->token; + assert( pToken->n>=1 ); + assert( pToken->z!=0 ); + assert( pToken->z[0]!=0 ); + if( pToken->n==1 ){ + /* Wildcard of the form "?". Assign the next variable number */ + pExpr->iTable = ++pParse->nVar; + }else if( pToken->z[0]=='?' ){ + /* Wildcard of the form "?nnn". Convert "nnn" to an integer and + ** use it as the variable number */ + int i; + pExpr->iTable = i = atoi((char*)&pToken->z[1]); + if( i<1 || i>SQLITE_MAX_VARIABLE_NUMBER ){ + sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d", + SQLITE_MAX_VARIABLE_NUMBER); + } + if( i>pParse->nVar ){ + pParse->nVar = i; + } + }else{ + /* Wildcards of the form ":aaa" or "$aaa". Reuse the same variable + ** number as the prior appearance of the same name, or if the name + ** has never appeared before, reuse the same variable number + */ + int i, n; + n = pToken->n; + for(i=0; inVarExpr; i++){ + Expr *pE; + if( (pE = pParse->apVarExpr[i])!=0 + && pE->token.n==n + && memcmp(pE->token.z, pToken->z, n)==0 ){ + pExpr->iTable = pE->iTable; + break; + } + } + if( i>=pParse->nVarExpr ){ + pExpr->iTable = ++pParse->nVar; + if( pParse->nVarExpr>=pParse->nVarExprAlloc-1 ){ + pParse->nVarExprAlloc += pParse->nVarExprAlloc + 10; + pParse->apVarExpr = + sqlite3DbReallocOrFree( + db, + pParse->apVarExpr, + pParse->nVarExprAlloc*sizeof(pParse->apVarExpr[0]) + ); + } + if( !db->mallocFailed ){ + assert( pParse->apVarExpr!=0 ); + pParse->apVarExpr[pParse->nVarExpr++] = pExpr; + } + } + } + if( !pParse->nErr && pParse->nVar>SQLITE_MAX_VARIABLE_NUMBER ){ + sqlite3ErrorMsg(pParse, "too many SQL variables"); + } +} + +/* +** Recursively delete an expression tree. +*/ +void sqlite3ExprDelete(Expr *p){ + if( p==0 ) return; + if( p->span.dyn ) sqlite3_free((char*)p->span.z); + if( p->token.dyn ) sqlite3_free((char*)p->token.z); + sqlite3ExprDelete(p->pLeft); + sqlite3ExprDelete(p->pRight); + sqlite3ExprListDelete(p->pList); + sqlite3SelectDelete(p->pSelect); + sqlite3_free(p); +} + +/* +** The Expr.token field might be a string literal that is quoted. +** If so, remove the quotation marks. +*/ +void sqlite3DequoteExpr(sqlite3 *db, Expr *p){ + if( ExprHasAnyProperty(p, EP_Dequoted) ){ + return; + } + ExprSetProperty(p, EP_Dequoted); + if( p->token.dyn==0 ){ + sqlite3TokenCopy(db, &p->token, &p->token); + } + sqlite3Dequote((char*)p->token.z); +} + + +/* +** The following group of routines make deep copies of expressions, +** expression lists, ID lists, and select statements. The copies can +** be deleted (by being passed to their respective ...Delete() routines) +** without effecting the originals. +** +** The expression list, ID, and source lists return by sqlite3ExprListDup(), +** sqlite3IdListDup(), and sqlite3SrcListDup() can not be further expanded +** by subsequent calls to sqlite*ListAppend() routines. +** +** Any tables that the SrcList might point to are not duplicated. +*/ +Expr *sqlite3ExprDup(sqlite3 *db, Expr *p){ + Expr *pNew; + if( p==0 ) return 0; + pNew = sqlite3DbMallocRaw(db, sizeof(*p) ); + if( pNew==0 ) return 0; + memcpy(pNew, p, sizeof(*pNew)); + if( p->token.z!=0 ){ + pNew->token.z = (u8*)sqlite3DbStrNDup(db, (char*)p->token.z, p->token.n); + pNew->token.dyn = 1; + }else{ + assert( pNew->token.z==0 ); + } + pNew->span.z = 0; + pNew->pLeft = sqlite3ExprDup(db, p->pLeft); + pNew->pRight = sqlite3ExprDup(db, p->pRight); + pNew->pList = sqlite3ExprListDup(db, p->pList); + pNew->pSelect = sqlite3SelectDup(db, p->pSelect); + return pNew; +} +void sqlite3TokenCopy(sqlite3 *db, Token *pTo, Token *pFrom){ + if( pTo->dyn ) sqlite3_free((char*)pTo->z); + if( pFrom->z ){ + pTo->n = pFrom->n; + pTo->z = (u8*)sqlite3DbStrNDup(db, (char*)pFrom->z, pFrom->n); + pTo->dyn = 1; + }else{ + pTo->z = 0; + } +} +ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p){ + ExprList *pNew; + struct ExprList_item *pItem, *pOldItem; + int i; + if( p==0 ) return 0; + pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); + if( pNew==0 ) return 0; + pNew->iECursor = 0; + pNew->nExpr = pNew->nAlloc = p->nExpr; + pNew->a = pItem = sqlite3DbMallocRaw(db, p->nExpr*sizeof(p->a[0]) ); + if( pItem==0 ){ + sqlite3_free(pNew); + return 0; + } + pOldItem = p->a; + for(i=0; inExpr; i++, pItem++, pOldItem++){ + Expr *pNewExpr, *pOldExpr; + pItem->pExpr = pNewExpr = sqlite3ExprDup(db, pOldExpr = pOldItem->pExpr); + if( pOldExpr->span.z!=0 && pNewExpr ){ + /* Always make a copy of the span for top-level expressions in the + ** expression list. The logic in SELECT processing that determines + ** the names of columns in the result set needs this information */ + sqlite3TokenCopy(db, &pNewExpr->span, &pOldExpr->span); + } + assert( pNewExpr==0 || pNewExpr->span.z!=0 + || pOldExpr->span.z==0 + || db->mallocFailed ); + pItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pItem->sortOrder = pOldItem->sortOrder; + pItem->isAgg = pOldItem->isAgg; + pItem->done = 0; + } + return pNew; +} + +/* +** If cursors, triggers, views and subqueries are all omitted from +** the build, then none of the following routines, except for +** sqlite3SelectDup(), can be called. sqlite3SelectDup() is sometimes +** called with a NULL argument. +*/ +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) \ + || !defined(SQLITE_OMIT_SUBQUERY) +SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p){ + SrcList *pNew; + int i; + int nByte; + if( p==0 ) return 0; + nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); + pNew = sqlite3DbMallocRaw(db, nByte ); + if( pNew==0 ) return 0; + pNew->nSrc = pNew->nAlloc = p->nSrc; + for(i=0; inSrc; i++){ + struct SrcList_item *pNewItem = &pNew->a[i]; + struct SrcList_item *pOldItem = &p->a[i]; + Table *pTab; + pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); + pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias); + pNewItem->jointype = pOldItem->jointype; + pNewItem->iCursor = pOldItem->iCursor; + pNewItem->isPopulated = pOldItem->isPopulated; + pTab = pNewItem->pTab = pOldItem->pTab; + if( pTab ){ + pTab->nRef++; + } + pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect); + pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn); + pNewItem->pUsing = sqlite3IdListDup(db, pOldItem->pUsing); + pNewItem->colUsed = pOldItem->colUsed; + } + return pNew; +} +IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ + IdList *pNew; + int i; + if( p==0 ) return 0; + pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); + if( pNew==0 ) return 0; + pNew->nId = pNew->nAlloc = p->nId; + pNew->a = sqlite3DbMallocRaw(db, p->nId*sizeof(p->a[0]) ); + if( pNew->a==0 ){ + sqlite3_free(pNew); + return 0; + } + for(i=0; inId; i++){ + struct IdList_item *pNewItem = &pNew->a[i]; + struct IdList_item *pOldItem = &p->a[i]; + pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pNewItem->idx = pOldItem->idx; + } + return pNew; +} +Select *sqlite3SelectDup(sqlite3 *db, Select *p){ + Select *pNew; + if( p==0 ) return 0; + pNew = sqlite3DbMallocRaw(db, sizeof(*p) ); + if( pNew==0 ) return 0; + pNew->isDistinct = p->isDistinct; + pNew->pEList = sqlite3ExprListDup(db, p->pEList); + pNew->pSrc = sqlite3SrcListDup(db, p->pSrc); + pNew->pWhere = sqlite3ExprDup(db, p->pWhere); + pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy); + pNew->pHaving = sqlite3ExprDup(db, p->pHaving); + pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy); + pNew->op = p->op; + pNew->pPrior = sqlite3SelectDup(db, p->pPrior); + pNew->pLimit = sqlite3ExprDup(db, p->pLimit); + pNew->pOffset = sqlite3ExprDup(db, p->pOffset); + pNew->iLimit = -1; + pNew->iOffset = -1; + pNew->isResolved = p->isResolved; + pNew->isAgg = p->isAgg; + pNew->usesEphm = 0; + pNew->disallowOrderBy = 0; + pNew->pRightmost = 0; + pNew->addrOpenEphm[0] = -1; + pNew->addrOpenEphm[1] = -1; + pNew->addrOpenEphm[2] = -1; + return pNew; +} +#else +Select *sqlite3SelectDup(sqlite3 *db, Select *p){ + assert( p==0 ); + return 0; +} +#endif + + +/* +** Add a new element to the end of an expression list. If pList is +** initially NULL, then create a new expression list. +*/ +ExprList *sqlite3ExprListAppend( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to append. Might be NULL */ + Expr *pExpr, /* Expression to be appended */ + Token *pName /* AS keyword for the expression */ +){ + sqlite3 *db = pParse->db; + if( pList==0 ){ + pList = sqlite3DbMallocZero(db, sizeof(ExprList) ); + if( pList==0 ){ + goto no_mem; + } + assert( pList->nAlloc==0 ); + } + if( pList->nAlloc<=pList->nExpr ){ + struct ExprList_item *a; + int n = pList->nAlloc*2 + 4; + a = sqlite3DbRealloc(db, pList->a, n*sizeof(pList->a[0])); + if( a==0 ){ + goto no_mem; + } + pList->a = a; + pList->nAlloc = n; + } + assert( pList->a!=0 ); + if( pExpr || pName ){ + struct ExprList_item *pItem = &pList->a[pList->nExpr++]; + memset(pItem, 0, sizeof(*pItem)); + pItem->zName = sqlite3NameFromToken(db, pName); + pItem->pExpr = pExpr; + } + return pList; + +no_mem: + /* Avoid leaking memory if malloc has failed. */ + sqlite3ExprDelete(pExpr); + sqlite3ExprListDelete(pList); + return 0; +} + +/* +** If the expression list pEList contains more than iLimit elements, +** leave an error message in pParse. +*/ +void sqlite3ExprListCheckLength( + Parse *pParse, + ExprList *pEList, + int iLimit, + const char *zObject +){ + if( pEList && pEList->nExpr>iLimit ){ + sqlite3ErrorMsg(pParse, "too many columns in %s", zObject); + } +} + + +#if defined(SQLITE_TEST) || SQLITE_MAX_EXPR_DEPTH>0 +/* The following three functions, heightOfExpr(), heightOfExprList() +** and heightOfSelect(), are used to determine the maximum height +** of any expression tree referenced by the structure passed as the +** first argument. +** +** If this maximum height is greater than the current value pointed +** to by pnHeight, the second parameter, then set *pnHeight to that +** value. +*/ +static void heightOfExpr(Expr *p, int *pnHeight){ + if( p ){ + if( p->nHeight>*pnHeight ){ + *pnHeight = p->nHeight; + } + } +} +static void heightOfExprList(ExprList *p, int *pnHeight){ + if( p ){ + int i; + for(i=0; inExpr; i++){ + heightOfExpr(p->a[i].pExpr, pnHeight); + } + } +} +static void heightOfSelect(Select *p, int *pnHeight){ + if( p ){ + heightOfExpr(p->pWhere, pnHeight); + heightOfExpr(p->pHaving, pnHeight); + heightOfExpr(p->pLimit, pnHeight); + heightOfExpr(p->pOffset, pnHeight); + heightOfExprList(p->pEList, pnHeight); + heightOfExprList(p->pGroupBy, pnHeight); + heightOfExprList(p->pOrderBy, pnHeight); + heightOfSelect(p->pPrior, pnHeight); + } +} + +/* +** Set the Expr.nHeight variable in the structure passed as an +** argument. An expression with no children, Expr.pList or +** Expr.pSelect member has a height of 1. Any other expression +** has a height equal to the maximum height of any other +** referenced Expr plus one. +*/ +void sqlite3ExprSetHeight(Expr *p){ + int nHeight = 0; + heightOfExpr(p->pLeft, &nHeight); + heightOfExpr(p->pRight, &nHeight); + heightOfExprList(p->pList, &nHeight); + heightOfSelect(p->pSelect, &nHeight); + p->nHeight = nHeight + 1; +} + +/* +** Return the maximum height of any expression tree referenced +** by the select statement passed as an argument. +*/ +int sqlite3SelectExprHeight(Select *p){ + int nHeight = 0; + heightOfSelect(p, &nHeight); + return nHeight; +} +#endif + +/* +** Delete an entire expression list. +*/ +void sqlite3ExprListDelete(ExprList *pList){ + int i; + struct ExprList_item *pItem; + if( pList==0 ) return; + assert( pList->a!=0 || (pList->nExpr==0 && pList->nAlloc==0) ); + assert( pList->nExpr<=pList->nAlloc ); + for(pItem=pList->a, i=0; inExpr; i++, pItem++){ + sqlite3ExprDelete(pItem->pExpr); + sqlite3_free(pItem->zName); + } + sqlite3_free(pList->a); + sqlite3_free(pList); +} + +/* +** Walk an expression tree. Call xFunc for each node visited. +** +** The return value from xFunc determines whether the tree walk continues. +** 0 means continue walking the tree. 1 means do not walk children +** of the current node but continue with siblings. 2 means abandon +** the tree walk completely. +** +** The return value from this routine is 1 to abandon the tree walk +** and 0 to continue. +** +** NOTICE: This routine does *not* descend into subqueries. +*/ +static int walkExprList(ExprList *, int (*)(void *, Expr*), void *); +static int walkExprTree(Expr *pExpr, int (*xFunc)(void*,Expr*), void *pArg){ + int rc; + if( pExpr==0 ) return 0; + rc = (*xFunc)(pArg, pExpr); + if( rc==0 ){ + if( walkExprTree(pExpr->pLeft, xFunc, pArg) ) return 1; + if( walkExprTree(pExpr->pRight, xFunc, pArg) ) return 1; + if( walkExprList(pExpr->pList, xFunc, pArg) ) return 1; + } + return rc>1; +} + +/* +** Call walkExprTree() for every expression in list p. +*/ +static int walkExprList(ExprList *p, int (*xFunc)(void *, Expr*), void *pArg){ + int i; + struct ExprList_item *pItem; + if( !p ) return 0; + for(i=p->nExpr, pItem=p->a; i>0; i--, pItem++){ + if( walkExprTree(pItem->pExpr, xFunc, pArg) ) return 1; + } + return 0; +} + +/* +** Call walkExprTree() for every expression in Select p, not including +** expressions that are part of sub-selects in any FROM clause or the LIMIT +** or OFFSET expressions.. +*/ +static int walkSelectExpr(Select *p, int (*xFunc)(void *, Expr*), void *pArg){ + walkExprList(p->pEList, xFunc, pArg); + walkExprTree(p->pWhere, xFunc, pArg); + walkExprList(p->pGroupBy, xFunc, pArg); + walkExprTree(p->pHaving, xFunc, pArg); + walkExprList(p->pOrderBy, xFunc, pArg); + if( p->pPrior ){ + walkSelectExpr(p->pPrior, xFunc, pArg); + } + return 0; +} + + +/* +** This routine is designed as an xFunc for walkExprTree(). +** +** pArg is really a pointer to an integer. If we can tell by looking +** at pExpr that the expression that contains pExpr is not a constant +** expression, then set *pArg to 0 and return 2 to abandon the tree walk. +** If pExpr does does not disqualify the expression from being a constant +** then do nothing. +** +** After walking the whole tree, if no nodes are found that disqualify +** the expression as constant, then we assume the whole expression +** is constant. See sqlite3ExprIsConstant() for additional information. +*/ +static int exprNodeIsConstant(void *pArg, Expr *pExpr){ + int *pN = (int*)pArg; + + /* If *pArg is 3 then any term of the expression that comes from + ** the ON or USING clauses of a join disqualifies the expression + ** from being considered constant. */ + if( (*pN)==3 && ExprHasAnyProperty(pExpr, EP_FromJoin) ){ + *pN = 0; + return 2; + } + + switch( pExpr->op ){ + /* Consider functions to be constant if all their arguments are constant + ** and *pArg==2 */ + case TK_FUNCTION: + if( (*pN)==2 ) return 0; + /* Fall through */ + case TK_ID: + case TK_COLUMN: + case TK_DOT: + case TK_AGG_FUNCTION: + case TK_AGG_COLUMN: +#ifndef SQLITE_OMIT_SUBQUERY + case TK_SELECT: + case TK_EXISTS: +#endif + *pN = 0; + return 2; + case TK_IN: + if( pExpr->pSelect ){ + *pN = 0; + return 2; + } + default: + return 0; + } +} + +/* +** Walk an expression tree. Return 1 if the expression is constant +** and 0 if it involves variables or function calls. +** +** For the purposes of this function, a double-quoted string (ex: "abc") +** is considered a variable but a single-quoted string (ex: 'abc') is +** a constant. +*/ +int sqlite3ExprIsConstant(Expr *p){ + int isConst = 1; + walkExprTree(p, exprNodeIsConstant, &isConst); + return isConst; +} + +/* +** Walk an expression tree. Return 1 if the expression is constant +** that does no originate from the ON or USING clauses of a join. +** Return 0 if it involves variables or function calls or terms from +** an ON or USING clause. +*/ +int sqlite3ExprIsConstantNotJoin(Expr *p){ + int isConst = 3; + walkExprTree(p, exprNodeIsConstant, &isConst); + return isConst!=0; +} + +/* +** Walk an expression tree. Return 1 if the expression is constant +** or a function call with constant arguments. Return and 0 if there +** are any variables. +** +** For the purposes of this function, a double-quoted string (ex: "abc") +** is considered a variable but a single-quoted string (ex: 'abc') is +** a constant. +*/ +int sqlite3ExprIsConstantOrFunction(Expr *p){ + int isConst = 2; + walkExprTree(p, exprNodeIsConstant, &isConst); + return isConst!=0; +} + +/* +** If the expression p codes a constant integer that is small enough +** to fit in a 32-bit integer, return 1 and put the value of the integer +** in *pValue. If the expression is not an integer or if it is too big +** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. +*/ +int sqlite3ExprIsInteger(Expr *p, int *pValue){ + switch( p->op ){ + case TK_INTEGER: { + if( sqlite3GetInt32((char*)p->token.z, pValue) ){ + return 1; + } + break; + } + case TK_UPLUS: { + return sqlite3ExprIsInteger(p->pLeft, pValue); + } + case TK_UMINUS: { + int v; + if( sqlite3ExprIsInteger(p->pLeft, &v) ){ + *pValue = -v; + return 1; + } + break; + } + default: break; + } + return 0; +} + +/* +** Return TRUE if the given string is a row-id column name. +*/ +int sqlite3IsRowid(const char *z){ + if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1; + if( sqlite3StrICmp(z, "ROWID")==0 ) return 1; + if( sqlite3StrICmp(z, "OID")==0 ) return 1; + return 0; +} + +/* +** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up +** that name in the set of source tables in pSrcList and make the pExpr +** expression node refer back to that source column. The following changes +** are made to pExpr: +** +** pExpr->iDb Set the index in db->aDb[] of the database holding +** the table. +** pExpr->iTable Set to the cursor number for the table obtained +** from pSrcList. +** pExpr->iColumn Set to the column number within the table. +** pExpr->op Set to TK_COLUMN. +** pExpr->pLeft Any expression this points to is deleted +** pExpr->pRight Any expression this points to is deleted. +** +** The pDbToken is the name of the database (the "X"). This value may be +** NULL meaning that name is of the form Y.Z or Z. Any available database +** can be used. The pTableToken is the name of the table (the "Y"). This +** value can be NULL if pDbToken is also NULL. If pTableToken is NULL it +** means that the form of the name is Z and that columns from any table +** can be used. +** +** If the name cannot be resolved unambiguously, leave an error message +** in pParse and return non-zero. Return zero on success. +*/ +static int lookupName( + Parse *pParse, /* The parsing context */ + Token *pDbToken, /* Name of the database containing table, or NULL */ + Token *pTableToken, /* Name of table containing column, or NULL */ + Token *pColumnToken, /* Name of the column. */ + NameContext *pNC, /* The name context used to resolve the name */ + Expr *pExpr /* Make this EXPR node point to the selected column */ +){ + char *zDb = 0; /* Name of the database. The "X" in X.Y.Z */ + char *zTab = 0; /* Name of the table. The "Y" in X.Y.Z or Y.Z */ + char *zCol = 0; /* Name of the column. The "Z" */ + int i, j; /* Loop counters */ + int cnt = 0; /* Number of matching column names */ + int cntTab = 0; /* Number of matching table names */ + sqlite3 *db = pParse->db; /* The database */ + struct SrcList_item *pItem; /* Use for looping over pSrcList items */ + struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ + NameContext *pTopNC = pNC; /* First namecontext in the list */ + Schema *pSchema = 0; /* Schema of the expression */ + + assert( pColumnToken && pColumnToken->z ); /* The Z in X.Y.Z cannot be NULL */ + zDb = sqlite3NameFromToken(db, pDbToken); + zTab = sqlite3NameFromToken(db, pTableToken); + zCol = sqlite3NameFromToken(db, pColumnToken); + if( db->mallocFailed ){ + goto lookupname_end; + } + + pExpr->iTable = -1; + while( pNC && cnt==0 ){ + ExprList *pEList; + SrcList *pSrcList = pNC->pSrcList; + + if( pSrcList ){ + for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ + Table *pTab; + int iDb; + Column *pCol; + + pTab = pItem->pTab; + assert( pTab!=0 ); + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( pTab->nCol>0 ); + if( zTab ){ + if( pItem->zAlias ){ + char *zTabName = pItem->zAlias; + if( sqlite3StrICmp(zTabName, zTab)!=0 ) continue; + }else{ + char *zTabName = pTab->zName; + if( zTabName==0 || sqlite3StrICmp(zTabName, zTab)!=0 ) continue; + if( zDb!=0 && sqlite3StrICmp(db->aDb[iDb].zName, zDb)!=0 ){ + continue; + } + } + } + if( 0==(cntTab++) ){ + pExpr->iTable = pItem->iCursor; + pSchema = pTab->pSchema; + pMatch = pItem; + } + for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ + if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ + const char *zColl = pTab->aCol[j].zColl; + IdList *pUsing; + cnt++; + pExpr->iTable = pItem->iCursor; + pMatch = pItem; + pSchema = pTab->pSchema; + /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ + pExpr->iColumn = j==pTab->iPKey ? -1 : j; + pExpr->affinity = pTab->aCol[j].affinity; + if( (pExpr->flags & EP_ExpCollate)==0 ){ + pExpr->pColl = sqlite3FindCollSeq(db, ENC(db), zColl,-1, 0); + } + if( inSrc-1 ){ + if( pItem[1].jointype & JT_NATURAL ){ + /* If this match occurred in the left table of a natural join, + ** then skip the right table to avoid a duplicate match */ + pItem++; + i++; + }else if( (pUsing = pItem[1].pUsing)!=0 ){ + /* If this match occurs on a column that is in the USING clause + ** of a join, skip the search of the right table of the join + ** to avoid a duplicate match there. */ + int k; + for(k=0; knId; k++){ + if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ){ + pItem++; + i++; + break; + } + } + } + } + break; + } + } + } + } + +#ifndef SQLITE_OMIT_TRIGGER + /* If we have not already resolved the name, then maybe + ** it is a new.* or old.* trigger argument reference + */ + if( zDb==0 && zTab!=0 && cnt==0 && pParse->trigStack!=0 ){ + TriggerStack *pTriggerStack = pParse->trigStack; + Table *pTab = 0; + if( pTriggerStack->newIdx != -1 && sqlite3StrICmp("new", zTab) == 0 ){ + pExpr->iTable = pTriggerStack->newIdx; + assert( pTriggerStack->pTab ); + pTab = pTriggerStack->pTab; + }else if( pTriggerStack->oldIdx != -1 && sqlite3StrICmp("old", zTab)==0 ){ + pExpr->iTable = pTriggerStack->oldIdx; + assert( pTriggerStack->pTab ); + pTab = pTriggerStack->pTab; + } + + if( pTab ){ + int iCol; + Column *pCol = pTab->aCol; + + pSchema = pTab->pSchema; + cntTab++; + for(iCol=0; iCol < pTab->nCol; iCol++, pCol++) { + if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ + const char *zColl = pTab->aCol[iCol].zColl; + cnt++; + pExpr->iColumn = iCol==pTab->iPKey ? -1 : iCol; + pExpr->affinity = pTab->aCol[iCol].affinity; + if( (pExpr->flags & EP_ExpCollate)==0 ){ + pExpr->pColl = sqlite3FindCollSeq(db, ENC(db), zColl,-1, 0); + } + pExpr->pTab = pTab; + break; + } + } + } + } +#endif /* !defined(SQLITE_OMIT_TRIGGER) */ + + /* + ** Perhaps the name is a reference to the ROWID + */ + if( cnt==0 && cntTab==1 && sqlite3IsRowid(zCol) ){ + cnt = 1; + pExpr->iColumn = -1; + pExpr->affinity = SQLITE_AFF_INTEGER; + } + + /* + ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z + ** might refer to an result-set alias. This happens, for example, when + ** we are resolving names in the WHERE clause of the following command: + ** + ** SELECT a+b AS x FROM table WHERE x<10; + ** + ** In cases like this, replace pExpr with a copy of the expression that + ** forms the result set entry ("a+b" in the example) and return immediately. + ** Note that the expression in the result set should have already been + ** resolved by the time the WHERE clause is resolved. + */ + if( cnt==0 && (pEList = pNC->pEList)!=0 && zTab==0 ){ + for(j=0; jnExpr; j++){ + char *zAs = pEList->a[j].zName; + if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ + Expr *pDup, *pOrig; + assert( pExpr->pLeft==0 && pExpr->pRight==0 ); + assert( pExpr->pList==0 ); + assert( pExpr->pSelect==0 ); + pOrig = pEList->a[j].pExpr; + if( !pNC->allowAgg && ExprHasProperty(pOrig, EP_Agg) ){ + sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); + sqlite3_free(zCol); + return 2; + } + pDup = sqlite3ExprDup(db, pOrig); + if( pExpr->flags & EP_ExpCollate ){ + pDup->pColl = pExpr->pColl; + pDup->flags |= EP_ExpCollate; + } + if( pExpr->span.dyn ) sqlite3_free((char*)pExpr->span.z); + if( pExpr->token.dyn ) sqlite3_free((char*)pExpr->token.z); + memcpy(pExpr, pDup, sizeof(*pExpr)); + sqlite3_free(pDup); + cnt = 1; + pMatch = 0; + assert( zTab==0 && zDb==0 ); + goto lookupname_end_2; + } + } + } + + /* Advance to the next name context. The loop will exit when either + ** we have a match (cnt>0) or when we run out of name contexts. + */ + if( cnt==0 ){ + pNC = pNC->pNext; + } + } + + /* + ** If X and Y are NULL (in other words if only the column name Z is + ** supplied) and the value of Z is enclosed in double-quotes, then + ** Z is a string literal if it doesn't match any column names. In that + ** case, we need to return right away and not make any changes to + ** pExpr. + ** + ** Because no reference was made to outer contexts, the pNC->nRef + ** fields are not changed in any context. + */ + if( cnt==0 && zTab==0 && pColumnToken->z[0]=='"' ){ + sqlite3_free(zCol); + return 0; + } + + /* + ** cnt==0 means there was not match. cnt>1 means there were two or + ** more matches. Either way, we have an error. + */ + if( cnt!=1 ){ + char *z = 0; + char *zErr; + zErr = cnt==0 ? "no such column: %s" : "ambiguous column name: %s"; + if( zDb ){ + sqlite3SetString(&z, zDb, ".", zTab, ".", zCol, (char*)0); + }else if( zTab ){ + sqlite3SetString(&z, zTab, ".", zCol, (char*)0); + }else{ + z = sqlite3StrDup(zCol); + } + if( z ){ + sqlite3ErrorMsg(pParse, zErr, z); + sqlite3_free(z); + pTopNC->nErr++; + }else{ + db->mallocFailed = 1; + } + } + + /* If a column from a table in pSrcList is referenced, then record + ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes + ** bit 0 to be set. Column 1 sets bit 1. And so forth. If the + ** column number is greater than the number of bits in the bitmask + ** then set the high-order bit of the bitmask. + */ + if( pExpr->iColumn>=0 && pMatch!=0 ){ + int n = pExpr->iColumn; + if( n>=sizeof(Bitmask)*8 ){ + n = sizeof(Bitmask)*8-1; + } + assert( pMatch->iCursor==pExpr->iTable ); + pMatch->colUsed |= ((Bitmask)1)<pLeft); + pExpr->pLeft = 0; + sqlite3ExprDelete(pExpr->pRight); + pExpr->pRight = 0; + pExpr->op = TK_COLUMN; +lookupname_end_2: + sqlite3_free(zCol); + if( cnt==1 ){ + assert( pNC!=0 ); + sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); + if( pMatch && !pMatch->pSelect ){ + pExpr->pTab = pMatch->pTab; + } + /* Increment the nRef value on all name contexts from TopNC up to + ** the point where the name matched. */ + for(;;){ + assert( pTopNC!=0 ); + pTopNC->nRef++; + if( pTopNC==pNC ) break; + pTopNC = pTopNC->pNext; + } + return 0; + } else { + return 1; + } +} + +/* +** This routine is designed as an xFunc for walkExprTree(). +** +** Resolve symbolic names into TK_COLUMN operators for the current +** node in the expression tree. Return 0 to continue the search down +** the tree or 2 to abort the tree walk. +** +** This routine also does error checking and name resolution for +** function names. The operator for aggregate functions is changed +** to TK_AGG_FUNCTION. +*/ +static int nameResolverStep(void *pArg, Expr *pExpr){ + NameContext *pNC = (NameContext*)pArg; + Parse *pParse; + + if( pExpr==0 ) return 1; + assert( pNC!=0 ); + pParse = pNC->pParse; + + if( ExprHasAnyProperty(pExpr, EP_Resolved) ) return 1; + ExprSetProperty(pExpr, EP_Resolved); +#ifndef NDEBUG + if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){ + SrcList *pSrcList = pNC->pSrcList; + int i; + for(i=0; ipSrcList->nSrc; i++){ + assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursornTab); + } + } +#endif + switch( pExpr->op ){ + /* Double-quoted strings (ex: "abc") are used as identifiers if + ** possible. Otherwise they remain as strings. Single-quoted + ** strings (ex: 'abc') are always string literals. + */ + case TK_STRING: { + if( pExpr->token.z[0]=='\'' ) break; + /* Fall thru into the TK_ID case if this is a double-quoted string */ + } + /* A lone identifier is the name of a column. + */ + case TK_ID: { + lookupName(pParse, 0, 0, &pExpr->token, pNC, pExpr); + return 1; + } + + /* A table name and column name: ID.ID + ** Or a database, table and column: ID.ID.ID + */ + case TK_DOT: { + Token *pColumn; + Token *pTable; + Token *pDb; + Expr *pRight; + + /* if( pSrcList==0 ) break; */ + pRight = pExpr->pRight; + if( pRight->op==TK_ID ){ + pDb = 0; + pTable = &pExpr->pLeft->token; + pColumn = &pRight->token; + }else{ + assert( pRight->op==TK_DOT ); + pDb = &pExpr->pLeft->token; + pTable = &pRight->pLeft->token; + pColumn = &pRight->pRight->token; + } + lookupName(pParse, pDb, pTable, pColumn, pNC, pExpr); + return 1; + } + + /* Resolve function names + */ + case TK_CONST_FUNC: + case TK_FUNCTION: { + ExprList *pList = pExpr->pList; /* The argument list */ + int n = pList ? pList->nExpr : 0; /* Number of arguments */ + int no_such_func = 0; /* True if no such function exists */ + int wrong_num_args = 0; /* True if wrong number of arguments */ + int is_agg = 0; /* True if is an aggregate function */ + int i; + int auth; /* Authorization to use the function */ + int nId; /* Number of characters in function name */ + const char *zId; /* The function name. */ + FuncDef *pDef; /* Information about the function */ + int enc = ENC(pParse->db); /* The database encoding */ + + zId = (char*)pExpr->token.z; + nId = pExpr->token.n; + pDef = sqlite3FindFunction(pParse->db, zId, nId, n, enc, 0); + if( pDef==0 ){ + pDef = sqlite3FindFunction(pParse->db, zId, nId, -1, enc, 0); + if( pDef==0 ){ + no_such_func = 1; + }else{ + wrong_num_args = 1; + } + }else{ + is_agg = pDef->xFunc==0; + } +#ifndef SQLITE_OMIT_AUTHORIZATION + if( pDef ){ + auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0, pDef->zName, 0); + if( auth!=SQLITE_OK ){ + if( auth==SQLITE_DENY ){ + sqlite3ErrorMsg(pParse, "not authorized to use function: %s", + pDef->zName); + pNC->nErr++; + } + pExpr->op = TK_NULL; + return 1; + } + } +#endif + if( is_agg && !pNC->allowAgg ){ + sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId); + pNC->nErr++; + is_agg = 0; + }else if( no_such_func ){ + sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); + pNC->nErr++; + }else if( wrong_num_args ){ + sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", + nId, zId); + pNC->nErr++; + } + if( is_agg ){ + pExpr->op = TK_AGG_FUNCTION; + pNC->hasAgg = 1; + } + if( is_agg ) pNC->allowAgg = 0; + for(i=0; pNC->nErr==0 && ia[i].pExpr, nameResolverStep, pNC); + } + if( is_agg ) pNC->allowAgg = 1; + /* FIX ME: Compute pExpr->affinity based on the expected return + ** type of the function + */ + return is_agg; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_SELECT: + case TK_EXISTS: +#endif + case TK_IN: { + if( pExpr->pSelect ){ + int nRef = pNC->nRef; +#ifndef SQLITE_OMIT_CHECK + if( pNC->isCheck ){ + sqlite3ErrorMsg(pParse,"subqueries prohibited in CHECK constraints"); + } +#endif + sqlite3SelectResolve(pParse, pExpr->pSelect, pNC); + assert( pNC->nRef>=nRef ); + if( nRef!=pNC->nRef ){ + ExprSetProperty(pExpr, EP_VarSelect); + } + } + break; + } +#ifndef SQLITE_OMIT_CHECK + case TK_VARIABLE: { + if( pNC->isCheck ){ + sqlite3ErrorMsg(pParse,"parameters prohibited in CHECK constraints"); + } + break; + } +#endif + } + return 0; +} + +/* +** This routine walks an expression tree and resolves references to +** table columns. Nodes of the form ID.ID or ID resolve into an +** index to the table in the table list and a column offset. The +** Expr.opcode for such nodes is changed to TK_COLUMN. The Expr.iTable +** value is changed to the index of the referenced table in pTabList +** plus the "base" value. The base value will ultimately become the +** VDBE cursor number for a cursor that is pointing into the referenced +** table. The Expr.iColumn value is changed to the index of the column +** of the referenced table. The Expr.iColumn value for the special +** ROWID column is -1. Any INTEGER PRIMARY KEY column is tried as an +** alias for ROWID. +** +** Also resolve function names and check the functions for proper +** usage. Make sure all function names are recognized and all functions +** have the correct number of arguments. Leave an error message +** in pParse->zErrMsg if anything is amiss. Return the number of errors. +** +** If the expression contains aggregate functions then set the EP_Agg +** property on the expression. +*/ +int sqlite3ExprResolveNames( + NameContext *pNC, /* Namespace to resolve expressions in. */ + Expr *pExpr /* The expression to be analyzed. */ +){ + int savedHasAgg; + if( pExpr==0 ) return 0; +#if defined(SQLITE_TEST) || SQLITE_MAX_EXPR_DEPTH>0 + if( (pExpr->nHeight+pNC->pParse->nHeight)>SQLITE_MAX_EXPR_DEPTH ){ + sqlite3ErrorMsg(pNC->pParse, + "Expression tree is too large (maximum depth %d)", + SQLITE_MAX_EXPR_DEPTH + ); + return 1; + } + pNC->pParse->nHeight += pExpr->nHeight; +#endif + savedHasAgg = pNC->hasAgg; + pNC->hasAgg = 0; + walkExprTree(pExpr, nameResolverStep, pNC); +#if defined(SQLITE_TEST) || SQLITE_MAX_EXPR_DEPTH>0 + pNC->pParse->nHeight -= pExpr->nHeight; +#endif + if( pNC->nErr>0 ){ + ExprSetProperty(pExpr, EP_Error); + } + if( pNC->hasAgg ){ + ExprSetProperty(pExpr, EP_Agg); + }else if( savedHasAgg ){ + pNC->hasAgg = 1; + } + return ExprHasProperty(pExpr, EP_Error); +} + +/* +** A pointer instance of this structure is used to pass information +** through walkExprTree into codeSubqueryStep(). +*/ +typedef struct QueryCoder QueryCoder; +struct QueryCoder { + Parse *pParse; /* The parsing context */ + NameContext *pNC; /* Namespace of first enclosing query */ +}; + + +/* +** Generate code for scalar subqueries used as an expression +** and IN operators. Examples: +** +** (SELECT a FROM b) -- subquery +** EXISTS (SELECT a FROM b) -- EXISTS subquery +** x IN (4,5,11) -- IN operator with list on right-hand side +** x IN (SELECT a FROM b) -- IN operator with subquery on the right +** +** The pExpr parameter describes the expression that contains the IN +** operator or subquery. +*/ +#ifndef SQLITE_OMIT_SUBQUERY +void sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ + int testAddr = 0; /* One-time test address */ + Vdbe *v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + + + /* This code must be run in its entirety every time it is encountered + ** if any of the following is true: + ** + ** * The right-hand side is a correlated subquery + ** * The right-hand side is an expression list containing variables + ** * We are inside a trigger + ** + ** If all of the above are false, then we can run this code just once + ** save the results, and reuse the same result on subsequent invocations. + */ + if( !ExprHasAnyProperty(pExpr, EP_VarSelect) && !pParse->trigStack ){ + int mem = pParse->nMem++; + sqlite3VdbeAddOp(v, OP_MemLoad, mem, 0); + testAddr = sqlite3VdbeAddOp(v, OP_If, 0, 0); + assert( testAddr>0 || pParse->db->mallocFailed ); + sqlite3VdbeAddOp(v, OP_MemInt, 1, mem); + } + + switch( pExpr->op ){ + case TK_IN: { + char affinity; + KeyInfo keyInfo; + int addr; /* Address of OP_OpenEphemeral instruction */ + + affinity = sqlite3ExprAffinity(pExpr->pLeft); + + /* Whether this is an 'x IN(SELECT...)' or an 'x IN()' + ** expression it is handled the same way. A virtual table is + ** filled with single-field index keys representing the results + ** from the SELECT or the . + ** + ** If the 'x' expression is a column value, or the SELECT... + ** statement returns a column value, then the affinity of that + ** column is used to build the index keys. If both 'x' and the + ** SELECT... statement are columns, then numeric affinity is used + ** if either column has NUMERIC or INTEGER affinity. If neither + ** 'x' nor the SELECT... statement are columns, then numeric affinity + ** is used. + */ + pExpr->iTable = pParse->nTab++; + addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, pExpr->iTable, 0); + memset(&keyInfo, 0, sizeof(keyInfo)); + keyInfo.nField = 1; + sqlite3VdbeAddOp(v, OP_SetNumColumns, pExpr->iTable, 1); + + if( pExpr->pSelect ){ + /* Case 1: expr IN (SELECT ...) + ** + ** Generate code to write the results of the select into the temporary + ** table allocated and opened above. + */ + int iParm = pExpr->iTable + (((int)affinity)<<16); + ExprList *pEList; + assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable ); + if( sqlite3Select(pParse, pExpr->pSelect, SRT_Set, iParm, 0, 0, 0, 0) ){ + return; + } + pEList = pExpr->pSelect->pEList; + if( pEList && pEList->nExpr>0 ){ + keyInfo.aColl[0] = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, + pEList->a[0].pExpr); + } + }else if( pExpr->pList ){ + /* Case 2: expr IN (exprlist) + ** + ** For each expression, build an index key from the evaluation and + ** store it in the temporary table. If is a column, then use + ** that columns affinity when building index keys. If is not + ** a column, use numeric affinity. + */ + int i; + ExprList *pList = pExpr->pList; + struct ExprList_item *pItem; + + if( !affinity ){ + affinity = SQLITE_AFF_NONE; + } + keyInfo.aColl[0] = pExpr->pLeft->pColl; + + /* Loop through each expression in . */ + for(i=pList->nExpr, pItem=pList->a; i>0; i--, pItem++){ + Expr *pE2 = pItem->pExpr; + + /* If the expression is not constant then we will need to + ** disable the test that was generated above that makes sure + ** this code only executes once. Because for a non-constant + ** expression we need to rerun this code each time. + */ + if( testAddr>0 && !sqlite3ExprIsConstant(pE2) ){ + sqlite3VdbeChangeToNoop(v, testAddr-1, 3); + testAddr = 0; + } + + /* Evaluate the expression and insert it into the temp table */ + sqlite3ExprCode(pParse, pE2); + sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &affinity, 1); + sqlite3VdbeAddOp(v, OP_IdxInsert, pExpr->iTable, 0); + } + } + sqlite3VdbeChangeP3(v, addr, (void *)&keyInfo, P3_KEYINFO); + break; + } + + case TK_EXISTS: + case TK_SELECT: { + /* This has to be a scalar SELECT. Generate code to put the + ** value of this select in a memory cell and record the number + ** of the memory cell in iColumn. + */ + static const Token one = { (u8*)"1", 0, 1 }; + Select *pSel; + int iMem; + int sop; + + pExpr->iColumn = iMem = pParse->nMem++; + pSel = pExpr->pSelect; + if( pExpr->op==TK_SELECT ){ + sop = SRT_Mem; + sqlite3VdbeAddOp(v, OP_MemNull, iMem, 0); + VdbeComment((v, "# Init subquery result")); + }else{ + sop = SRT_Exists; + sqlite3VdbeAddOp(v, OP_MemInt, 0, iMem); + VdbeComment((v, "# Init EXISTS result")); + } + sqlite3ExprDelete(pSel->pLimit); + pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &one); + if( sqlite3Select(pParse, pSel, sop, iMem, 0, 0, 0, 0) ){ + return; + } + break; + } + } + + if( testAddr ){ + sqlite3VdbeJumpHere(v, testAddr); + } + + return; +} +#endif /* SQLITE_OMIT_SUBQUERY */ + +/* +** Generate an instruction that will put the integer describe by +** text z[0..n-1] on the stack. +*/ +static void codeInteger(Vdbe *v, const char *z, int n){ + assert( z || v==0 || sqlite3VdbeDb(v)->mallocFailed ); + if( z ){ + int i; + if( sqlite3GetInt32(z, &i) ){ + sqlite3VdbeAddOp(v, OP_Integer, i, 0); + }else if( sqlite3FitsIn64Bits(z) ){ + sqlite3VdbeOp3(v, OP_Int64, 0, 0, z, n); + }else{ + sqlite3VdbeOp3(v, OP_Real, 0, 0, z, n); + } + } +} + + +/* +** Generate code that will extract the iColumn-th column from +** table pTab and push that column value on the stack. There +** is an open cursor to pTab in iTable. If iColumn<0 then +** code is generated that extracts the rowid. +*/ +void sqlite3ExprCodeGetColumn(Vdbe *v, Table *pTab, int iColumn, int iTable){ + if( iColumn<0 ){ + int op = (pTab && IsVirtual(pTab)) ? OP_VRowid : OP_Rowid; + sqlite3VdbeAddOp(v, op, iTable, 0); + }else if( pTab==0 ){ + sqlite3VdbeAddOp(v, OP_Column, iTable, iColumn); + }else{ + int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; + sqlite3VdbeAddOp(v, op, iTable, iColumn); + sqlite3ColumnDefault(v, pTab, iColumn); +#ifndef SQLITE_OMIT_FLOATING_POINT + if( pTab->aCol[iColumn].affinity==SQLITE_AFF_REAL ){ + sqlite3VdbeAddOp(v, OP_RealAffinity, 0, 0); + } +#endif + } +} + +/* +** Generate code into the current Vdbe to evaluate the given +** expression and leave the result on the top of stack. +** +** This code depends on the fact that certain token values (ex: TK_EQ) +** are the same as opcode values (ex: OP_Eq) that implement the corresponding +** operation. Special comments in vdbe.c and the mkopcodeh.awk script in +** the make process cause these values to align. Assert()s in the code +** below verify that the numbers are aligned correctly. +*/ +void sqlite3ExprCode(Parse *pParse, Expr *pExpr){ + Vdbe *v = pParse->pVdbe; + int op; + int stackChng = 1; /* Amount of change to stack depth */ + + if( v==0 ) return; + if( pExpr==0 ){ + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + return; + } + op = pExpr->op; + switch( op ){ + case TK_AGG_COLUMN: { + AggInfo *pAggInfo = pExpr->pAggInfo; + struct AggInfo_col *pCol = &pAggInfo->aCol[pExpr->iAgg]; + if( !pAggInfo->directMode ){ + sqlite3VdbeAddOp(v, OP_MemLoad, pCol->iMem, 0); + break; + }else if( pAggInfo->useSortingIdx ){ + sqlite3VdbeAddOp(v, OP_Column, pAggInfo->sortingIdx, + pCol->iSorterColumn); + break; + } + /* Otherwise, fall thru into the TK_COLUMN case */ + } + case TK_COLUMN: { + if( pExpr->iTable<0 ){ + /* This only happens when coding check constraints */ + assert( pParse->ckOffset>0 ); + sqlite3VdbeAddOp(v, OP_Dup, pParse->ckOffset-pExpr->iColumn-1, 1); + }else{ + sqlite3ExprCodeGetColumn(v, pExpr->pTab, pExpr->iColumn, pExpr->iTable); + } + break; + } + case TK_INTEGER: { + codeInteger(v, (char*)pExpr->token.z, pExpr->token.n); + break; + } + case TK_FLOAT: + case TK_STRING: { + assert( TK_FLOAT==OP_Real ); + assert( TK_STRING==OP_String8 ); + sqlite3DequoteExpr(pParse->db, pExpr); + sqlite3VdbeOp3(v, op, 0, 0, (char*)pExpr->token.z, pExpr->token.n); + break; + } + case TK_NULL: { + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + break; + } +#ifndef SQLITE_OMIT_BLOB_LITERAL + case TK_BLOB: { + int n; + const char *z; + assert( TK_BLOB==OP_HexBlob ); + n = pExpr->token.n - 3; + z = (char*)pExpr->token.z + 2; + assert( n>=0 ); + if( n==0 ){ + z = ""; + } + sqlite3VdbeOp3(v, op, 0, 0, z, n); + break; + } +#endif + case TK_VARIABLE: { + sqlite3VdbeAddOp(v, OP_Variable, pExpr->iTable, 0); + if( pExpr->token.n>1 ){ + sqlite3VdbeChangeP3(v, -1, (char*)pExpr->token.z, pExpr->token.n); + } + break; + } + case TK_REGISTER: { + sqlite3VdbeAddOp(v, OP_MemLoad, pExpr->iTable, 0); + break; + } +#ifndef SQLITE_OMIT_CAST + case TK_CAST: { + /* Expressions of the form: CAST(pLeft AS token) */ + int aff, to_op; + sqlite3ExprCode(pParse, pExpr->pLeft); + aff = sqlite3AffinityType(&pExpr->token); + to_op = aff - SQLITE_AFF_TEXT + OP_ToText; + assert( to_op==OP_ToText || aff!=SQLITE_AFF_TEXT ); + assert( to_op==OP_ToBlob || aff!=SQLITE_AFF_NONE ); + assert( to_op==OP_ToNumeric || aff!=SQLITE_AFF_NUMERIC ); + assert( to_op==OP_ToInt || aff!=SQLITE_AFF_INTEGER ); + assert( to_op==OP_ToReal || aff!=SQLITE_AFF_REAL ); + sqlite3VdbeAddOp(v, to_op, 0, 0); + stackChng = 0; + break; + } +#endif /* SQLITE_OMIT_CAST */ + case TK_LT: + case TK_LE: + case TK_GT: + case TK_GE: + case TK_NE: + case TK_EQ: { + assert( TK_LT==OP_Lt ); + assert( TK_LE==OP_Le ); + assert( TK_GT==OP_Gt ); + assert( TK_GE==OP_Ge ); + assert( TK_EQ==OP_Eq ); + assert( TK_NE==OP_Ne ); + sqlite3ExprCode(pParse, pExpr->pLeft); + sqlite3ExprCode(pParse, pExpr->pRight); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, 0, 0); + stackChng = -1; + break; + } + case TK_AND: + case TK_OR: + case TK_PLUS: + case TK_STAR: + case TK_MINUS: + case TK_REM: + case TK_BITAND: + case TK_BITOR: + case TK_SLASH: + case TK_LSHIFT: + case TK_RSHIFT: + case TK_CONCAT: { + assert( TK_AND==OP_And ); + assert( TK_OR==OP_Or ); + assert( TK_PLUS==OP_Add ); + assert( TK_MINUS==OP_Subtract ); + assert( TK_REM==OP_Remainder ); + assert( TK_BITAND==OP_BitAnd ); + assert( TK_BITOR==OP_BitOr ); + assert( TK_SLASH==OP_Divide ); + assert( TK_LSHIFT==OP_ShiftLeft ); + assert( TK_RSHIFT==OP_ShiftRight ); + assert( TK_CONCAT==OP_Concat ); + sqlite3ExprCode(pParse, pExpr->pLeft); + sqlite3ExprCode(pParse, pExpr->pRight); + sqlite3VdbeAddOp(v, op, 0, 0); + stackChng = -1; + break; + } + case TK_UMINUS: { + Expr *pLeft = pExpr->pLeft; + assert( pLeft ); + if( pLeft->op==TK_FLOAT || pLeft->op==TK_INTEGER ){ + Token *p = &pLeft->token; + char *z = sqlite3MPrintf(pParse->db, "-%.*s", p->n, p->z); + if( pLeft->op==TK_FLOAT ){ + sqlite3VdbeOp3(v, OP_Real, 0, 0, z, p->n+1); + }else{ + codeInteger(v, z, p->n+1); + } + sqlite3_free(z); + break; + } + /* Fall through into TK_NOT */ + } + case TK_BITNOT: + case TK_NOT: { + assert( TK_BITNOT==OP_BitNot ); + assert( TK_NOT==OP_Not ); + sqlite3ExprCode(pParse, pExpr->pLeft); + sqlite3VdbeAddOp(v, op, 0, 0); + stackChng = 0; + break; + } + case TK_ISNULL: + case TK_NOTNULL: { + int dest; + assert( TK_ISNULL==OP_IsNull ); + assert( TK_NOTNULL==OP_NotNull ); + sqlite3VdbeAddOp(v, OP_Integer, 1, 0); + sqlite3ExprCode(pParse, pExpr->pLeft); + dest = sqlite3VdbeCurrentAddr(v) + 2; + sqlite3VdbeAddOp(v, op, 1, dest); + sqlite3VdbeAddOp(v, OP_AddImm, -1, 0); + stackChng = 0; + break; + } + case TK_AGG_FUNCTION: { + AggInfo *pInfo = pExpr->pAggInfo; + if( pInfo==0 ){ + sqlite3ErrorMsg(pParse, "misuse of aggregate: %T", + &pExpr->span); + }else{ + sqlite3VdbeAddOp(v, OP_MemLoad, pInfo->aFunc[pExpr->iAgg].iMem, 0); + } + break; + } + case TK_CONST_FUNC: + case TK_FUNCTION: { + ExprList *pList = pExpr->pList; + int nExpr = pList ? pList->nExpr : 0; + FuncDef *pDef; + int nId; + const char *zId; + int constMask = 0; + int i; + sqlite3 *db = pParse->db; + u8 enc = ENC(db); + CollSeq *pColl = 0; + + zId = (char*)pExpr->token.z; + nId = pExpr->token.n; + pDef = sqlite3FindFunction(pParse->db, zId, nId, nExpr, enc, 0); + assert( pDef!=0 ); + nExpr = sqlite3ExprCodeExprList(pParse, pList); +#ifndef SQLITE_OMIT_VIRTUALTABLE + /* Possibly overload the function if the first argument is + ** a virtual table column. + ** + ** For infix functions (LIKE, GLOB, REGEXP, and MATCH) use the + ** second argument, not the first, as the argument to test to + ** see if it is a column in a virtual table. This is done because + ** the left operand of infix functions (the operand we want to + ** control overloading) ends up as the second argument to the + ** function. The expression "A glob B" is equivalent to + ** "glob(B,A). We want to use the A in "A glob B" to test + ** for function overloading. But we use the B term in "glob(B,A)". + */ + if( nExpr>=2 && (pExpr->flags & EP_InfixFunc) ){ + pDef = sqlite3VtabOverloadFunction(db, pDef, nExpr, pList->a[1].pExpr); + }else if( nExpr>0 ){ + pDef = sqlite3VtabOverloadFunction(db, pDef, nExpr, pList->a[0].pExpr); + } +#endif + for(i=0; ia[i].pExpr) ){ + constMask |= (1<needCollSeq && !pColl ){ + pColl = sqlite3ExprCollSeq(pParse, pList->a[i].pExpr); + } + } + if( pDef->needCollSeq ){ + if( !pColl ) pColl = pParse->db->pDfltColl; + sqlite3VdbeOp3(v, OP_CollSeq, 0, 0, (char *)pColl, P3_COLLSEQ); + } + sqlite3VdbeOp3(v, OP_Function, constMask, nExpr, (char*)pDef, P3_FUNCDEF); + stackChng = 1-nExpr; + break; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_EXISTS: + case TK_SELECT: { + if( pExpr->iColumn==0 ){ + sqlite3CodeSubselect(pParse, pExpr); + } + sqlite3VdbeAddOp(v, OP_MemLoad, pExpr->iColumn, 0); + VdbeComment((v, "# load subquery result")); + break; + } + case TK_IN: { + int addr; + char affinity; + int ckOffset = pParse->ckOffset; + sqlite3CodeSubselect(pParse, pExpr); + + /* Figure out the affinity to use to create a key from the results + ** of the expression. affinityStr stores a static string suitable for + ** P3 of OP_MakeRecord. + */ + affinity = comparisonAffinity(pExpr); + + sqlite3VdbeAddOp(v, OP_Integer, 1, 0); + pParse->ckOffset = (ckOffset ? (ckOffset+1) : 0); + + /* Code the from " IN (...)". The temporary table + ** pExpr->iTable contains the values that make up the (...) set. + */ + sqlite3ExprCode(pParse, pExpr->pLeft); + addr = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp(v, OP_NotNull, -1, addr+4); /* addr + 0 */ + sqlite3VdbeAddOp(v, OP_Pop, 2, 0); + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, addr+7); + sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &affinity, 1); /* addr + 4 */ + sqlite3VdbeAddOp(v, OP_Found, pExpr->iTable, addr+7); + sqlite3VdbeAddOp(v, OP_AddImm, -1, 0); /* addr + 6 */ + + break; + } +#endif + case TK_BETWEEN: { + Expr *pLeft = pExpr->pLeft; + struct ExprList_item *pLItem = pExpr->pList->a; + Expr *pRight = pLItem->pExpr; + sqlite3ExprCode(pParse, pLeft); + sqlite3VdbeAddOp(v, OP_Dup, 0, 0); + sqlite3ExprCode(pParse, pRight); + codeCompare(pParse, pLeft, pRight, OP_Ge, 0, 0); + sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + pLItem++; + pRight = pLItem->pExpr; + sqlite3ExprCode(pParse, pRight); + codeCompare(pParse, pLeft, pRight, OP_Le, 0, 0); + sqlite3VdbeAddOp(v, OP_And, 0, 0); + break; + } + case TK_UPLUS: { + sqlite3ExprCode(pParse, pExpr->pLeft); + stackChng = 0; + break; + } + case TK_CASE: { + int expr_end_label; + int jumpInst; + int nExpr; + int i; + ExprList *pEList; + struct ExprList_item *aListelem; + + assert(pExpr->pList); + assert((pExpr->pList->nExpr % 2) == 0); + assert(pExpr->pList->nExpr > 0); + pEList = pExpr->pList; + aListelem = pEList->a; + nExpr = pEList->nExpr; + expr_end_label = sqlite3VdbeMakeLabel(v); + if( pExpr->pLeft ){ + sqlite3ExprCode(pParse, pExpr->pLeft); + } + for(i=0; ipLeft ){ + sqlite3VdbeAddOp(v, OP_Dup, 1, 1); + jumpInst = codeCompare(pParse, pExpr->pLeft, aListelem[i].pExpr, + OP_Ne, 0, 1); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + }else{ + jumpInst = sqlite3VdbeAddOp(v, OP_IfNot, 1, 0); + } + sqlite3ExprCode(pParse, aListelem[i+1].pExpr); + sqlite3VdbeAddOp(v, OP_Goto, 0, expr_end_label); + sqlite3VdbeJumpHere(v, jumpInst); + } + if( pExpr->pLeft ){ + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + } + if( pExpr->pRight ){ + sqlite3ExprCode(pParse, pExpr->pRight); + }else{ + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + } + sqlite3VdbeResolveLabel(v, expr_end_label); + break; + } +#ifndef SQLITE_OMIT_TRIGGER + case TK_RAISE: { + if( !pParse->trigStack ){ + sqlite3ErrorMsg(pParse, + "RAISE() may only be used within a trigger-program"); + return; + } + if( pExpr->iColumn!=OE_Ignore ){ + assert( pExpr->iColumn==OE_Rollback || + pExpr->iColumn == OE_Abort || + pExpr->iColumn == OE_Fail ); + sqlite3DequoteExpr(pParse->db, pExpr); + sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, pExpr->iColumn, + (char*)pExpr->token.z, pExpr->token.n); + } else { + assert( pExpr->iColumn == OE_Ignore ); + sqlite3VdbeAddOp(v, OP_ContextPop, 0, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, pParse->trigStack->ignoreJump); + VdbeComment((v, "# raise(IGNORE)")); + } + stackChng = 0; + break; + } +#endif + } + + if( pParse->ckOffset ){ + pParse->ckOffset += stackChng; + assert( pParse->ckOffset ); + } +} + +#ifndef SQLITE_OMIT_TRIGGER +/* +** Generate code that evalutes the given expression and leaves the result +** on the stack. See also sqlite3ExprCode(). +** +** This routine might also cache the result and modify the pExpr tree +** so that it will make use of the cached result on subsequent evaluations +** rather than evaluate the whole expression again. Trivial expressions are +** not cached. If the expression is cached, its result is stored in a +** memory location. +*/ +void sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr){ + Vdbe *v = pParse->pVdbe; + int iMem; + int addr1, addr2; + if( v==0 ) return; + addr1 = sqlite3VdbeCurrentAddr(v); + sqlite3ExprCode(pParse, pExpr); + addr2 = sqlite3VdbeCurrentAddr(v); + if( addr2>addr1+1 || sqlite3VdbeGetOp(v, addr1)->opcode==OP_Function ){ + iMem = pExpr->iTable = pParse->nMem++; + sqlite3VdbeAddOp(v, OP_MemStore, iMem, 0); + pExpr->op = TK_REGISTER; + } +} +#endif + +/* +** Generate code that pushes the value of every element of the given +** expression list onto the stack. +** +** Return the number of elements pushed onto the stack. +*/ +int sqlite3ExprCodeExprList( + Parse *pParse, /* Parsing context */ + ExprList *pList /* The expression list to be coded */ +){ + struct ExprList_item *pItem; + int i, n; + if( pList==0 ) return 0; + n = pList->nExpr; + for(pItem=pList->a, i=n; i>0; i--, pItem++){ + sqlite3ExprCode(pParse, pItem->pExpr); + } + return n; +} + +/* +** Generate code for a boolean expression such that a jump is made +** to the label "dest" if the expression is true but execution +** continues straight thru if the expression is false. +** +** If the expression evaluates to NULL (neither true nor false), then +** take the jump if the jumpIfNull flag is true. +** +** This code depends on the fact that certain token values (ex: TK_EQ) +** are the same as opcode values (ex: OP_Eq) that implement the corresponding +** operation. Special comments in vdbe.c and the mkopcodeh.awk script in +** the make process cause these values to align. Assert()s in the code +** below verify that the numbers are aligned correctly. +*/ +void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ + Vdbe *v = pParse->pVdbe; + int op = 0; + int ckOffset = pParse->ckOffset; + if( v==0 || pExpr==0 ) return; + op = pExpr->op; + switch( op ){ + case TK_AND: { + int d2 = sqlite3VdbeMakeLabel(v); + sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2, !jumpIfNull); + sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); + sqlite3VdbeResolveLabel(v, d2); + break; + } + case TK_OR: { + sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); + sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); + break; + } + case TK_NOT: { + sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); + break; + } + case TK_LT: + case TK_LE: + case TK_GT: + case TK_GE: + case TK_NE: + case TK_EQ: { + assert( TK_LT==OP_Lt ); + assert( TK_LE==OP_Le ); + assert( TK_GT==OP_Gt ); + assert( TK_GE==OP_Ge ); + assert( TK_EQ==OP_Eq ); + assert( TK_NE==OP_Ne ); + sqlite3ExprCode(pParse, pExpr->pLeft); + sqlite3ExprCode(pParse, pExpr->pRight); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, dest, jumpIfNull); + break; + } + case TK_ISNULL: + case TK_NOTNULL: { + assert( TK_ISNULL==OP_IsNull ); + assert( TK_NOTNULL==OP_NotNull ); + sqlite3ExprCode(pParse, pExpr->pLeft); + sqlite3VdbeAddOp(v, op, 1, dest); + break; + } + case TK_BETWEEN: { + /* The expression "x BETWEEN y AND z" is implemented as: + ** + ** 1 IF (x < y) GOTO 3 + ** 2 IF (x <= z) GOTO + ** 3 ... + */ + int addr; + Expr *pLeft = pExpr->pLeft; + Expr *pRight = pExpr->pList->a[0].pExpr; + sqlite3ExprCode(pParse, pLeft); + sqlite3VdbeAddOp(v, OP_Dup, 0, 0); + sqlite3ExprCode(pParse, pRight); + addr = codeCompare(pParse, pLeft, pRight, OP_Lt, 0, !jumpIfNull); + + pRight = pExpr->pList->a[1].pExpr; + sqlite3ExprCode(pParse, pRight); + codeCompare(pParse, pLeft, pRight, OP_Le, dest, jumpIfNull); + + sqlite3VdbeAddOp(v, OP_Integer, 0, 0); + sqlite3VdbeJumpHere(v, addr); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + break; + } + default: { + sqlite3ExprCode(pParse, pExpr); + sqlite3VdbeAddOp(v, OP_If, jumpIfNull, dest); + break; + } + } + pParse->ckOffset = ckOffset; +} + +/* +** Generate code for a boolean expression such that a jump is made +** to the label "dest" if the expression is false but execution +** continues straight thru if the expression is true. +** +** If the expression evaluates to NULL (neither true nor false) then +** jump if jumpIfNull is true or fall through if jumpIfNull is false. +*/ +void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ + Vdbe *v = pParse->pVdbe; + int op = 0; + int ckOffset = pParse->ckOffset; + if( v==0 || pExpr==0 ) return; + + /* The value of pExpr->op and op are related as follows: + ** + ** pExpr->op op + ** --------- ---------- + ** TK_ISNULL OP_NotNull + ** TK_NOTNULL OP_IsNull + ** TK_NE OP_Eq + ** TK_EQ OP_Ne + ** TK_GT OP_Le + ** TK_LE OP_Gt + ** TK_GE OP_Lt + ** TK_LT OP_Ge + ** + ** For other values of pExpr->op, op is undefined and unused. + ** The value of TK_ and OP_ constants are arranged such that we + ** can compute the mapping above using the following expression. + ** Assert()s verify that the computation is correct. + */ + op = ((pExpr->op+(TK_ISNULL&1))^1)-(TK_ISNULL&1); + + /* Verify correct alignment of TK_ and OP_ constants + */ + assert( pExpr->op!=TK_ISNULL || op==OP_NotNull ); + assert( pExpr->op!=TK_NOTNULL || op==OP_IsNull ); + assert( pExpr->op!=TK_NE || op==OP_Eq ); + assert( pExpr->op!=TK_EQ || op==OP_Ne ); + assert( pExpr->op!=TK_LT || op==OP_Ge ); + assert( pExpr->op!=TK_LE || op==OP_Gt ); + assert( pExpr->op!=TK_GT || op==OP_Le ); + assert( pExpr->op!=TK_GE || op==OP_Lt ); + + switch( pExpr->op ){ + case TK_AND: { + sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); + sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); + break; + } + case TK_OR: { + int d2 = sqlite3VdbeMakeLabel(v); + sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, !jumpIfNull); + sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); + sqlite3VdbeResolveLabel(v, d2); + break; + } + case TK_NOT: { + sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); + break; + } + case TK_LT: + case TK_LE: + case TK_GT: + case TK_GE: + case TK_NE: + case TK_EQ: { + sqlite3ExprCode(pParse, pExpr->pLeft); + sqlite3ExprCode(pParse, pExpr->pRight); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, dest, jumpIfNull); + break; + } + case TK_ISNULL: + case TK_NOTNULL: { + sqlite3ExprCode(pParse, pExpr->pLeft); + sqlite3VdbeAddOp(v, op, 1, dest); + break; + } + case TK_BETWEEN: { + /* The expression is "x BETWEEN y AND z". It is implemented as: + ** + ** 1 IF (x >= y) GOTO 3 + ** 2 GOTO + ** 3 IF (x > z) GOTO + */ + int addr; + Expr *pLeft = pExpr->pLeft; + Expr *pRight = pExpr->pList->a[0].pExpr; + sqlite3ExprCode(pParse, pLeft); + sqlite3VdbeAddOp(v, OP_Dup, 0, 0); + sqlite3ExprCode(pParse, pRight); + addr = sqlite3VdbeCurrentAddr(v); + codeCompare(pParse, pLeft, pRight, OP_Ge, addr+3, !jumpIfNull); + + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, dest); + pRight = pExpr->pList->a[1].pExpr; + sqlite3ExprCode(pParse, pRight); + codeCompare(pParse, pLeft, pRight, OP_Gt, dest, jumpIfNull); + break; + } + default: { + sqlite3ExprCode(pParse, pExpr); + sqlite3VdbeAddOp(v, OP_IfNot, jumpIfNull, dest); + break; + } + } + pParse->ckOffset = ckOffset; +} + +/* +** Do a deep comparison of two expression trees. Return TRUE (non-zero) +** if they are identical and return FALSE if they differ in any way. +** +** Sometimes this routine will return FALSE even if the two expressions +** really are equivalent. If we cannot prove that the expressions are +** identical, we return FALSE just to be safe. So if this routine +** returns false, then you do not really know for certain if the two +** expressions are the same. But if you get a TRUE return, then you +** can be sure the expressions are the same. In the places where +** this routine is used, it does not hurt to get an extra FALSE - that +** just might result in some slightly slower code. But returning +** an incorrect TRUE could lead to a malfunction. +*/ +int sqlite3ExprCompare(Expr *pA, Expr *pB){ + int i; + if( pA==0||pB==0 ){ + return pB==pA; + } + if( pA->op!=pB->op ) return 0; + if( (pA->flags & EP_Distinct)!=(pB->flags & EP_Distinct) ) return 0; + if( !sqlite3ExprCompare(pA->pLeft, pB->pLeft) ) return 0; + if( !sqlite3ExprCompare(pA->pRight, pB->pRight) ) return 0; + if( pA->pList ){ + if( pB->pList==0 ) return 0; + if( pA->pList->nExpr!=pB->pList->nExpr ) return 0; + for(i=0; ipList->nExpr; i++){ + if( !sqlite3ExprCompare(pA->pList->a[i].pExpr, pB->pList->a[i].pExpr) ){ + return 0; + } + } + }else if( pB->pList ){ + return 0; + } + if( pA->pSelect || pB->pSelect ) return 0; + if( pA->iTable!=pB->iTable || pA->iColumn!=pB->iColumn ) return 0; + if( pA->op!=TK_COLUMN && pA->token.z ){ + if( pB->token.z==0 ) return 0; + if( pB->token.n!=pA->token.n ) return 0; + if( sqlite3StrNICmp((char*)pA->token.z,(char*)pB->token.z,pB->token.n)!=0 ){ + return 0; + } + } + return 1; +} + + +/* +** Add a new element to the pAggInfo->aCol[] array. Return the index of +** the new element. Return a negative number if malloc fails. +*/ +static int addAggInfoColumn(sqlite3 *db, AggInfo *pInfo){ + int i; + pInfo->aCol = sqlite3ArrayAllocate( + db, + pInfo->aCol, + sizeof(pInfo->aCol[0]), + 3, + &pInfo->nColumn, + &pInfo->nColumnAlloc, + &i + ); + return i; +} + +/* +** Add a new element to the pAggInfo->aFunc[] array. Return the index of +** the new element. Return a negative number if malloc fails. +*/ +static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){ + int i; + pInfo->aFunc = sqlite3ArrayAllocate( + db, + pInfo->aFunc, + sizeof(pInfo->aFunc[0]), + 3, + &pInfo->nFunc, + &pInfo->nFuncAlloc, + &i + ); + return i; +} + +/* +** This is an xFunc for walkExprTree() used to implement +** sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates +** for additional information. +** +** This routine analyzes the aggregate function at pExpr. +*/ +static int analyzeAggregate(void *pArg, Expr *pExpr){ + int i; + NameContext *pNC = (NameContext *)pArg; + Parse *pParse = pNC->pParse; + SrcList *pSrcList = pNC->pSrcList; + AggInfo *pAggInfo = pNC->pAggInfo; + + switch( pExpr->op ){ + case TK_AGG_COLUMN: + case TK_COLUMN: { + /* Check to see if the column is in one of the tables in the FROM + ** clause of the aggregate query */ + if( pSrcList ){ + struct SrcList_item *pItem = pSrcList->a; + for(i=0; inSrc; i++, pItem++){ + struct AggInfo_col *pCol; + if( pExpr->iTable==pItem->iCursor ){ + /* If we reach this point, it means that pExpr refers to a table + ** that is in the FROM clause of the aggregate query. + ** + ** Make an entry for the column in pAggInfo->aCol[] if there + ** is not an entry there already. + */ + int k; + pCol = pAggInfo->aCol; + for(k=0; knColumn; k++, pCol++){ + if( pCol->iTable==pExpr->iTable && + pCol->iColumn==pExpr->iColumn ){ + break; + } + } + if( (k>=pAggInfo->nColumn) + && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 + ){ + pCol = &pAggInfo->aCol[k]; + pCol->pTab = pExpr->pTab; + pCol->iTable = pExpr->iTable; + pCol->iColumn = pExpr->iColumn; + pCol->iMem = pParse->nMem++; + pCol->iSorterColumn = -1; + pCol->pExpr = pExpr; + if( pAggInfo->pGroupBy ){ + int j, n; + ExprList *pGB = pAggInfo->pGroupBy; + struct ExprList_item *pTerm = pGB->a; + n = pGB->nExpr; + for(j=0; jpExpr; + if( pE->op==TK_COLUMN && pE->iTable==pExpr->iTable && + pE->iColumn==pExpr->iColumn ){ + pCol->iSorterColumn = j; + break; + } + } + } + if( pCol->iSorterColumn<0 ){ + pCol->iSorterColumn = pAggInfo->nSortingColumn++; + } + } + /* There is now an entry for pExpr in pAggInfo->aCol[] (either + ** because it was there before or because we just created it). + ** Convert the pExpr to be a TK_AGG_COLUMN referring to that + ** pAggInfo->aCol[] entry. + */ + pExpr->pAggInfo = pAggInfo; + pExpr->op = TK_AGG_COLUMN; + pExpr->iAgg = k; + break; + } /* endif pExpr->iTable==pItem->iCursor */ + } /* end loop over pSrcList */ + } + return 1; + } + case TK_AGG_FUNCTION: { + /* The pNC->nDepth==0 test causes aggregate functions in subqueries + ** to be ignored */ + if( pNC->nDepth==0 ){ + /* Check to see if pExpr is a duplicate of another aggregate + ** function that is already in the pAggInfo structure + */ + struct AggInfo_func *pItem = pAggInfo->aFunc; + for(i=0; inFunc; i++, pItem++){ + if( sqlite3ExprCompare(pItem->pExpr, pExpr) ){ + break; + } + } + if( i>=pAggInfo->nFunc ){ + /* pExpr is original. Make a new entry in pAggInfo->aFunc[] + */ + u8 enc = ENC(pParse->db); + i = addAggInfoFunc(pParse->db, pAggInfo); + if( i>=0 ){ + pItem = &pAggInfo->aFunc[i]; + pItem->pExpr = pExpr; + pItem->iMem = pParse->nMem++; + pItem->pFunc = sqlite3FindFunction(pParse->db, + (char*)pExpr->token.z, pExpr->token.n, + pExpr->pList ? pExpr->pList->nExpr : 0, enc, 0); + if( pExpr->flags & EP_Distinct ){ + pItem->iDistinct = pParse->nTab++; + }else{ + pItem->iDistinct = -1; + } + } + } + /* Make pExpr point to the appropriate pAggInfo->aFunc[] entry + */ + pExpr->iAgg = i; + pExpr->pAggInfo = pAggInfo; + return 1; + } + } + } + + /* Recursively walk subqueries looking for TK_COLUMN nodes that need + ** to be changed to TK_AGG_COLUMN. But increment nDepth so that + ** TK_AGG_FUNCTION nodes in subqueries will be unchanged. + */ + if( pExpr->pSelect ){ + pNC->nDepth++; + walkSelectExpr(pExpr->pSelect, analyzeAggregate, pNC); + pNC->nDepth--; + } + return 0; +} + +/* +** Analyze the given expression looking for aggregate functions and +** for variables that need to be added to the pParse->aAgg[] array. +** Make additional entries to the pParse->aAgg[] array as necessary. +** +** This routine should only be called after the expression has been +** analyzed by sqlite3ExprResolveNames(). +** +** If errors are seen, leave an error message in zErrMsg and return +** the number of errors. +*/ +int sqlite3ExprAnalyzeAggregates(NameContext *pNC, Expr *pExpr){ + int nErr = pNC->pParse->nErr; + walkExprTree(pExpr, analyzeAggregate, pNC); + return pNC->pParse->nErr - nErr; +} + +/* +** Call sqlite3ExprAnalyzeAggregates() for every expression in an +** expression list. Return the number of errors. +** +** If an error is found, the analysis is cut short. +*/ +int sqlite3ExprAnalyzeAggList(NameContext *pNC, ExprList *pList){ + struct ExprList_item *pItem; + int i; + int nErr = 0; + if( pList ){ + for(pItem=pList->a, i=0; nErr==0 && inExpr; i++, pItem++){ + nErr += sqlite3ExprAnalyzeAggregates(pNC, pItem->pExpr); + } + } + return nErr; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/func.c b/libraries/sqlite/unix/sqlite-3.5.1/src/func.c new file mode 100644 index 0000000000..1760626ff2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/func.c @@ -0,0 +1,1509 @@ +/* +** 2002 February 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement various SQL +** functions of SQLite. +** +** There is only one exported symbol in this file - the function +** sqliteRegisterBuildinFunctions() found at the bottom of the file. +** All other code has file scope. +** +** $Id: func.c,v 1.174 2007/09/03 11:04:22 danielk1977 Exp $ +*/ +#include "sqliteInt.h" +#include +#include +#include +#include "vdbeInt.h" + + +/* +** Return the collating function associated with a function. +*/ +static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){ + return context->pColl; +} + +/* +** Implementation of the non-aggregate min() and max() functions +*/ +static void minmaxFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int i; + int mask; /* 0 for min() or 0xffffffff for max() */ + int iBest; + CollSeq *pColl; + + if( argc==0 ) return; + mask = sqlite3_user_data(context)==0 ? 0 : -1; + pColl = sqlite3GetFuncCollSeq(context); + assert( pColl ); + assert( mask==-1 || mask==0 ); + iBest = 0; + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + for(i=1; i=0 ){ + iBest = i; + } + } + sqlite3_result_value(context, argv[iBest]); +} + +/* +** Return the type of the argument. +*/ +static void typeofFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *z = 0; + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_NULL: z = "null"; break; + case SQLITE_INTEGER: z = "integer"; break; + case SQLITE_TEXT: z = "text"; break; + case SQLITE_FLOAT: z = "real"; break; + case SQLITE_BLOB: z = "blob"; break; + } + sqlite3_result_text(context, z, -1, SQLITE_STATIC); +} + + +/* +** Implementation of the length() function +*/ +static void lengthFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int len; + + assert( argc==1 ); + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_BLOB: + case SQLITE_INTEGER: + case SQLITE_FLOAT: { + sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); + break; + } + case SQLITE_TEXT: { + const unsigned char *z = sqlite3_value_text(argv[0]); + if( z==0 ) return; + len = 0; + while( *z ){ + len++; + SQLITE_SKIP_UTF8(z); + } + sqlite3_result_int(context, len); + break; + } + default: { + sqlite3_result_null(context); + break; + } + } +} + +/* +** Implementation of the abs() function +*/ +static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + assert( argc==1 ); + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_INTEGER: { + i64 iVal = sqlite3_value_int64(argv[0]); + if( iVal<0 ){ + if( (iVal<<1)==0 ){ + sqlite3_result_error(context, "integer overflow", -1); + return; + } + iVal = -iVal; + } + sqlite3_result_int64(context, iVal); + break; + } + case SQLITE_NULL: { + sqlite3_result_null(context); + break; + } + default: { + double rVal = sqlite3_value_double(argv[0]); + if( rVal<0 ) rVal = -rVal; + sqlite3_result_double(context, rVal); + break; + } + } +} + +/* +** Implementation of the substr() function. +** +** substr(x,p1,p2) returns p2 characters of x[] beginning with p1. +** p1 is 1-indexed. So substr(x,1,1) returns the first character +** of x. If x is text, then we actually count UTF-8 characters. +** If x is a blob, then we count bytes. +** +** If p1 is negative, then we begin abs(p1) from the end of x[]. +*/ +static void substrFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *z; + const unsigned char *z2; + int len; + int p0type; + i64 p1, p2; + + assert( argc==3 ); + p0type = sqlite3_value_type(argv[0]); + if( p0type==SQLITE_BLOB ){ + len = sqlite3_value_bytes(argv[0]); + z = sqlite3_value_blob(argv[0]); + if( z==0 ) return; + assert( len==sqlite3_value_bytes(argv[0]) ); + }else{ + z = sqlite3_value_text(argv[0]); + if( z==0 ) return; + len = 0; + for(z2=z; *z2; len++){ + SQLITE_SKIP_UTF8(z2); + } + } + p1 = sqlite3_value_int(argv[1]); + p2 = sqlite3_value_int(argv[2]); + if( p1<0 ){ + p1 += len; + if( p1<0 ){ + p2 += p1; + p1 = 0; + } + }else if( p1>0 ){ + p1--; + } + if( p1+p2>len ){ + p2 = len-p1; + } + if( p0type!=SQLITE_BLOB ){ + while( *z && p1 ){ + SQLITE_SKIP_UTF8(z); + p1--; + } + for(z2=z; *z2 && p2; p2--){ + SQLITE_SKIP_UTF8(z2); + } + sqlite3_result_text(context, (char*)z, z2-z, SQLITE_TRANSIENT); + }else{ + if( p2<0 ) p2 = 0; + sqlite3_result_blob(context, (char*)&z[p1], p2, SQLITE_TRANSIENT); + } +} + +/* +** Implementation of the round() function +*/ +static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + int n = 0; + double r; + char zBuf[500]; /* larger than the %f representation of the largest double */ + assert( argc==1 || argc==2 ); + if( argc==2 ){ + if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; + n = sqlite3_value_int(argv[1]); + if( n>30 ) n = 30; + if( n<0 ) n = 0; + } + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + r = sqlite3_value_double(argv[0]); + sqlite3_snprintf(sizeof(zBuf),zBuf,"%.*f",n,r); + sqlite3AtoF(zBuf, &r); + sqlite3_result_double(context, r); +} + +/* +** Allocate nByte bytes of space using sqlite3_malloc(). If the +** allocation fails, call sqlite3_result_error_nomem() to notify +** the database handle that malloc() has failed. +*/ +static void *contextMalloc(sqlite3_context *context, int nByte){ + char *z = sqlite3_malloc(nByte); + if( !z && nByte>0 ){ + sqlite3_result_error_nomem(context); + } + return z; +} + +/* +** Implementation of the upper() and lower() SQL functions. +*/ +static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + char *z1; + const char *z2; + int i, n; + if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; + z2 = (char*)sqlite3_value_text(argv[0]); + n = sqlite3_value_bytes(argv[0]); + /* Verify that the call to _bytes() does not invalidate the _text() pointer */ + assert( z2==(char*)sqlite3_value_text(argv[0]) ); + if( z2 ){ + z1 = contextMalloc(context, n+1); + if( z1 ){ + memcpy(z1, z2, n+1); + for(i=0; z1[i]; i++){ + z1[i] = toupper(z1[i]); + } + sqlite3_result_text(context, z1, -1, sqlite3_free); + } + } +} +static void lowerFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + char *z1; + const char *z2; + int i, n; + if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; + z2 = (char*)sqlite3_value_text(argv[0]); + n = sqlite3_value_bytes(argv[0]); + /* Verify that the call to _bytes() does not invalidate the _text() pointer */ + assert( z2==(char*)sqlite3_value_text(argv[0]) ); + if( z2 ){ + z1 = contextMalloc(context, n+1); + if( z1 ){ + memcpy(z1, z2, n+1); + for(i=0; z1[i]; i++){ + z1[i] = tolower(z1[i]); + } + sqlite3_result_text(context, z1, -1, sqlite3_free); + } + } +} + +/* +** Implementation of the IFNULL(), NVL(), and COALESCE() functions. +** All three do the same thing. They return the first non-NULL +** argument. +*/ +static void ifnullFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int i; + for(i=0; iSQLITE_MAX_LENGTH ){ + sqlite3_result_error_toobig(context); + return; + } + p = contextMalloc(context, n); + if( p ){ + sqlite3Randomness(n, p); + sqlite3_result_blob(context, (char*)p, n, sqlite3_free); + } +} + +/* +** Implementation of the last_insert_rowid() SQL function. The return +** value is the same as the sqlite3_last_insert_rowid() API function. +*/ +static void last_insert_rowid( + sqlite3_context *context, + int arg, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_user_data(context); + sqlite3_result_int64(context, sqlite3_last_insert_rowid(db)); +} + +/* +** Implementation of the changes() SQL function. The return value is the +** same as the sqlite3_changes() API function. +*/ +static void changes( + sqlite3_context *context, + int arg, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_user_data(context); + sqlite3_result_int(context, sqlite3_changes(db)); +} + +/* +** Implementation of the total_changes() SQL function. The return value is +** the same as the sqlite3_total_changes() API function. +*/ +static void total_changes( + sqlite3_context *context, + int arg, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_user_data(context); + sqlite3_result_int(context, sqlite3_total_changes(db)); +} + +/* +** A structure defining how to do GLOB-style comparisons. +*/ +struct compareInfo { + u8 matchAll; + u8 matchOne; + u8 matchSet; + u8 noCase; +}; + +static const struct compareInfo globInfo = { '*', '?', '[', 0 }; +/* The correct SQL-92 behavior is for the LIKE operator to ignore +** case. Thus 'a' LIKE 'A' would be true. */ +static const struct compareInfo likeInfoNorm = { '%', '_', 0, 1 }; +/* If SQLITE_CASE_SENSITIVE_LIKE is defined, then the LIKE operator +** is case sensitive causing 'a' LIKE 'A' to be false */ +static const struct compareInfo likeInfoAlt = { '%', '_', 0, 0 }; + +/* +** Compare two UTF-8 strings for equality where the first string can +** potentially be a "glob" expression. Return true (1) if they +** are the same and false (0) if they are different. +** +** Globbing rules: +** +** '*' Matches any sequence of zero or more characters. +** +** '?' Matches exactly one character. +** +** [...] Matches one character from the enclosed list of +** characters. +** +** [^...] Matches one character not in the enclosed list. +** +** With the [...] and [^...] matching, a ']' character can be included +** in the list by making it the first character after '[' or '^'. A +** range of characters can be specified using '-'. Example: +** "[a-z]" matches any single lower-case letter. To match a '-', make +** it the last character in the list. +** +** This routine is usually quick, but can be N**2 in the worst case. +** +** Hints: to match '*' or '?', put them in "[]". Like this: +** +** abc[*]xyz Matches "abc*xyz" only +*/ +static int patternCompare( + const u8 *zPattern, /* The glob pattern */ + const u8 *zString, /* The string to compare against the glob */ + const struct compareInfo *pInfo, /* Information about how to do the compare */ + const int esc /* The escape character */ +){ + int c, c2; + int invert; + int seen; + u8 matchOne = pInfo->matchOne; + u8 matchAll = pInfo->matchAll; + u8 matchSet = pInfo->matchSet; + u8 noCase = pInfo->noCase; + int prevEscape = 0; /* True if the previous character was 'escape' */ + + while( (c = sqlite3Utf8Read(zPattern,0,&zPattern))!=0 ){ + if( !prevEscape && c==matchAll ){ + while( (c=sqlite3Utf8Read(zPattern,0,&zPattern)) == matchAll + || c == matchOne ){ + if( c==matchOne && sqlite3Utf8Read(zString, 0, &zString)==0 ){ + return 0; + } + } + if( c==0 ){ + return 1; + }else if( c==esc ){ + c = sqlite3Utf8Read(zPattern, 0, &zPattern); + if( c==0 ){ + return 0; + } + }else if( c==matchSet ){ + assert( esc==0 ); /* This is GLOB, not LIKE */ + assert( matchSet<0x80 ); /* '[' is a single-byte character */ + while( *zString && patternCompare(&zPattern[-1],zString,pInfo,esc)==0 ){ + SQLITE_SKIP_UTF8(zString); + } + return *zString!=0; + } + while( (c2 = sqlite3Utf8Read(zString,0,&zString))!=0 ){ + if( noCase ){ + c2 = c2<0x80 ? sqlite3UpperToLower[c2] : c2; + c = c<0x80 ? sqlite3UpperToLower[c] : c; + while( c2 != 0 && c2 != c ){ + c2 = sqlite3Utf8Read(zString, 0, &zString); + if( c2<0x80 ) c2 = sqlite3UpperToLower[c2]; + } + }else{ + while( c2 != 0 && c2 != c ){ + c2 = sqlite3Utf8Read(zString, 0, &zString); + } + } + if( c2==0 ) return 0; + if( patternCompare(zPattern,zString,pInfo,esc) ) return 1; + } + return 0; + }else if( !prevEscape && c==matchOne ){ + if( sqlite3Utf8Read(zString, 0, &zString)==0 ){ + return 0; + } + }else if( c==matchSet ){ + int prior_c = 0; + assert( esc==0 ); /* This only occurs for GLOB, not LIKE */ + seen = 0; + invert = 0; + c = sqlite3Utf8Read(zString, 0, &zString); + if( c==0 ) return 0; + c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + if( c2=='^' ){ + invert = 1; + c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + } + if( c2==']' ){ + if( c==']' ) seen = 1; + c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + } + while( c2 && c2!=']' ){ + if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){ + c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + if( c>=prior_c && c<=c2 ) seen = 1; + prior_c = 0; + }else{ + if( c==c2 ){ + seen = 1; + } + prior_c = c2; + } + c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + } + if( c2==0 || (seen ^ invert)==0 ){ + return 0; + } + }else if( esc==c && !prevEscape ){ + prevEscape = 1; + }else{ + c2 = sqlite3Utf8Read(zString, 0, &zString); + if( noCase ){ + c = c<0x80 ? sqlite3UpperToLower[c] : c; + c2 = c2<0x80 ? sqlite3UpperToLower[c2] : c2; + } + if( c!=c2 ){ + return 0; + } + prevEscape = 0; + } + } + return *zString==0; +} + +/* +** Count the number of times that the LIKE operator (or GLOB which is +** just a variation of LIKE) gets called. This is used for testing +** only. +*/ +#ifdef SQLITE_TEST +int sqlite3_like_count = 0; +#endif + + +/* +** Implementation of the like() SQL function. This function implements +** the build-in LIKE operator. The first argument to the function is the +** pattern and the second argument is the string. So, the SQL statements: +** +** A LIKE B +** +** is implemented as like(B,A). +** +** This same function (with a different compareInfo structure) computes +** the GLOB operator. +*/ +static void likeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zA, *zB; + int escape = 0; + + zB = sqlite3_value_text(argv[0]); + zA = sqlite3_value_text(argv[1]); + + /* Limit the length of the LIKE or GLOB pattern to avoid problems + ** of deep recursion and N*N behavior in patternCompare(). + */ + if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ + sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); + return; + } + assert( zB==sqlite3_value_text(argv[0]) ); /* Encoding did not change */ + + if( argc==3 ){ + /* The escape character string must consist of a single UTF-8 character. + ** Otherwise, return an error. + */ + const unsigned char *zEsc = sqlite3_value_text(argv[2]); + if( zEsc==0 ) return; + if( sqlite3Utf8CharLen((char*)zEsc, -1)!=1 ){ + sqlite3_result_error(context, + "ESCAPE expression must be a single character", -1); + return; + } + escape = sqlite3Utf8Read(zEsc, 0, &zEsc); + } + if( zA && zB ){ + struct compareInfo *pInfo = sqlite3_user_data(context); +#ifdef SQLITE_TEST + sqlite3_like_count++; +#endif + + sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)); + } +} + +/* +** Implementation of the NULLIF(x,y) function. The result is the first +** argument if the arguments are different. The result is NULL if the +** arguments are equal to each other. +*/ +static void nullifFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + CollSeq *pColl = sqlite3GetFuncCollSeq(context); + if( sqlite3MemCompare(argv[0], argv[1], pColl)!=0 ){ + sqlite3_result_value(context, argv[0]); + } +} + +/* +** Implementation of the VERSION(*) function. The result is the version +** of the SQLite library that is running. +*/ +static void versionFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + sqlite3_result_text(context, sqlite3_version, -1, SQLITE_STATIC); +} + +/* Array for converting from half-bytes (nybbles) into ASCII hex +** digits. */ +static const char hexdigits[] = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' +}; + +/* +** EXPERIMENTAL - This is not an official function. The interface may +** change. This function may disappear. Do not write code that depends +** on this function. +** +** Implementation of the QUOTE() function. This function takes a single +** argument. If the argument is numeric, the return value is the same as +** the argument. If the argument is NULL, the return value is the string +** "NULL". Otherwise, the argument is enclosed in single quotes with +** single-quote escapes. +*/ +static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + if( argc<1 ) return; + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_NULL: { + sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); + break; + } + case SQLITE_INTEGER: + case SQLITE_FLOAT: { + sqlite3_result_value(context, argv[0]); + break; + } + case SQLITE_BLOB: { + char *zText = 0; + char const *zBlob = sqlite3_value_blob(argv[0]); + int nBlob = sqlite3_value_bytes(argv[0]); + assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ + + if( 2*nBlob+4>SQLITE_MAX_LENGTH ){ + sqlite3_result_error_toobig(context); + return; + } + zText = (char *)contextMalloc(context, (2*nBlob)+4); + if( zText ){ + int i; + for(i=0; i>4)&0x0F]; + zText[(i*2)+3] = hexdigits[(zBlob[i])&0x0F]; + } + zText[(nBlob*2)+2] = '\''; + zText[(nBlob*2)+3] = '\0'; + zText[0] = 'X'; + zText[1] = '\''; + sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); + sqlite3_free(zText); + } + break; + } + case SQLITE_TEXT: { + int i,j; + u64 n; + const unsigned char *zArg = sqlite3_value_text(argv[0]); + char *z; + + if( zArg==0 ) return; + for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; } + if( i+n+3>SQLITE_MAX_LENGTH ){ + sqlite3_result_error_toobig(context); + return; + } + z = contextMalloc(context, i+n+3); + if( z ){ + z[0] = '\''; + for(i=0, j=1; zArg[i]; i++){ + z[j++] = zArg[i]; + if( zArg[i]=='\'' ){ + z[j++] = '\''; + } + } + z[j++] = '\''; + z[j] = 0; + sqlite3_result_text(context, z, j, sqlite3_free); + } + } + } +} + +/* +** The hex() function. Interpret the argument as a blob. Return +** a hexadecimal rendering as text. +*/ +static void hexFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int i, n; + const unsigned char *pBlob; + char *zHex, *z; + assert( argc==1 ); + pBlob = sqlite3_value_blob(argv[0]); + n = sqlite3_value_bytes(argv[0]); + if( n*2+1>SQLITE_MAX_LENGTH ){ + sqlite3_result_error_toobig(context); + return; + } + assert( pBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ + z = zHex = contextMalloc(context, n*2 + 1); + if( zHex ){ + for(i=0; i>4)&0xf]; + *(z++) = hexdigits[c&0xf]; + } + *z = 0; + sqlite3_result_text(context, zHex, n*2, sqlite3_free); + } +} + +/* +** The zeroblob(N) function returns a zero-filled blob of size N bytes. +*/ +static void zeroblobFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + i64 n; + assert( argc==1 ); + n = sqlite3_value_int64(argv[0]); + if( n>SQLITE_MAX_LENGTH ){ + sqlite3_result_error_toobig(context); + }else{ + sqlite3_result_zeroblob(context, n); + } +} + +/* +** The replace() function. Three arguments are all strings: call +** them A, B, and C. The result is also a string which is derived +** from A by replacing every occurance of B with C. The match +** must be exact. Collating sequences are not used. +*/ +static void replaceFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zStr; /* The input string A */ + const unsigned char *zPattern; /* The pattern string B */ + const unsigned char *zRep; /* The replacement string C */ + unsigned char *zOut; /* The output */ + int nStr; /* Size of zStr */ + int nPattern; /* Size of zPattern */ + int nRep; /* Size of zRep */ + i64 nOut; /* Maximum size of zOut */ + int loopLimit; /* Last zStr[] that might match zPattern[] */ + int i, j; /* Loop counters */ + + assert( argc==3 ); + zStr = sqlite3_value_text(argv[0]); + if( zStr==0 ) return; + nStr = sqlite3_value_bytes(argv[0]); + assert( zStr==sqlite3_value_text(argv[0]) ); /* No encoding change */ + zPattern = sqlite3_value_text(argv[1]); + if( zPattern==0 || zPattern[0]==0 ) return; + nPattern = sqlite3_value_bytes(argv[1]); + assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */ + zRep = sqlite3_value_text(argv[2]); + if( zRep==0 ) return; + nRep = sqlite3_value_bytes(argv[2]); + assert( zRep==sqlite3_value_text(argv[2]) ); + nOut = nStr + 1; + assert( nOut=SQLITE_MAX_LENGTH ){ + sqlite3_result_error_toobig(context); + sqlite3_free(zOut); + return; + } + zOld = zOut; + zOut = sqlite3_realloc(zOut, (int)nOut); + if( zOut==0 ){ + sqlite3_result_error_nomem(context); + sqlite3_free(zOld); + return; + } + memcpy(&zOut[j], zRep, nRep); + j += nRep; + i += nPattern-1; + } + } + assert( j+nStr-i+1==nOut ); + memcpy(&zOut[j], &zStr[i], nStr-i); + j += nStr - i; + assert( j<=nOut ); + zOut[j] = 0; + sqlite3_result_text(context, (char*)zOut, j, sqlite3_free); +} + +/* +** Implementation of the TRIM(), LTRIM(), and RTRIM() functions. +** The userdata is 0x1 for left trim, 0x2 for right trim, 0x3 for both. +*/ +static void trimFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zIn; /* Input string */ + const unsigned char *zCharSet; /* Set of characters to trim */ + int nIn; /* Number of bytes in input */ + int flags; /* 1: trimleft 2: trimright 3: trim */ + int i; /* Loop counter */ + unsigned char *aLen; /* Length of each character in zCharSet */ + const unsigned char **azChar; /* Individual characters in zCharSet */ + int nChar; /* Number of characters in zCharSet */ + + if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + return; + } + zIn = sqlite3_value_text(argv[0]); + if( zIn==0 ) return; + nIn = sqlite3_value_bytes(argv[0]); + assert( zIn==sqlite3_value_text(argv[0]) ); + if( argc==1 ){ + static const unsigned char lenOne[] = { 1 }; + static const unsigned char *azOne[] = { (u8*)" " }; + nChar = 1; + aLen = (u8*)lenOne; + azChar = azOne; + zCharSet = 0; + }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){ + return; + }else{ + const unsigned char *z; + for(z=zCharSet, nChar=0; *z; nChar++){ + SQLITE_SKIP_UTF8(z); + } + if( nChar>0 ){ + azChar = contextMalloc(context, nChar*(sizeof(char*)+1)); + if( azChar==0 ){ + return; + } + aLen = (unsigned char*)&azChar[nChar]; + for(z=zCharSet, nChar=0; *z; nChar++){ + azChar[nChar] = z; + SQLITE_SKIP_UTF8(z); + aLen[nChar] = z - azChar[nChar]; + } + } + } + if( nChar>0 ){ + flags = (int)sqlite3_user_data(context); + if( flags & 1 ){ + while( nIn>0 ){ + int len; + for(i=0; i=nChar ) break; + zIn += len; + nIn -= len; + } + } + if( flags & 2 ){ + while( nIn>0 ){ + int len; + for(i=0; i=nChar ) break; + nIn -= len; + } + } + if( zCharSet ){ + sqlite3_free(azChar); + } + } + sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT); +} + +#ifdef SQLITE_SOUNDEX +/* +** Compute the soundex encoding of a word. +*/ +static void soundexFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + char zResult[8]; + const u8 *zIn; + int i, j; + static const unsigned char iCode[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, + 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, + 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, + 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, + }; + assert( argc==1 ); + zIn = (u8*)sqlite3_value_text(argv[0]); + if( zIn==0 ) zIn = (u8*)""; + for(i=0; zIn[i] && !isalpha(zIn[i]); i++){} + if( zIn[i] ){ + u8 prevcode = iCode[zIn[i]&0x7f]; + zResult[0] = toupper(zIn[i]); + for(j=1; j<4 && zIn[i]; i++){ + int code = iCode[zIn[i]&0x7f]; + if( code>0 ){ + if( code!=prevcode ){ + prevcode = code; + zResult[j++] = code + '0'; + } + }else{ + prevcode = 0; + } + } + while( j<4 ){ + zResult[j++] = '0'; + } + zResult[j] = 0; + sqlite3_result_text(context, zResult, 4, SQLITE_TRANSIENT); + }else{ + sqlite3_result_text(context, "?000", 4, SQLITE_STATIC); + } +} +#endif + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** A function that loads a shared-library extension then returns NULL. +*/ +static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ + const char *zFile = (const char *)sqlite3_value_text(argv[0]); + const char *zProc; + sqlite3 *db = sqlite3_user_data(context); + char *zErrMsg = 0; + + if( argc==2 ){ + zProc = (const char *)sqlite3_value_text(argv[1]); + }else{ + zProc = 0; + } + if( zFile && sqlite3_load_extension(db, zFile, zProc, &zErrMsg) ){ + sqlite3_result_error(context, zErrMsg, -1); + sqlite3_free(zErrMsg); + } +} +#endif + +#ifdef SQLITE_TEST +/* +** This function generates a string of random characters. Used for +** generating test data. +*/ +static void randStr(sqlite3_context *context, int argc, sqlite3_value **argv){ + static const unsigned char zSrc[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789" + ".-!,:*^+=_|?/<> "; + int iMin, iMax, n, r, i; + unsigned char zBuf[1000]; + + /* It used to be possible to call randstr() with any number of arguments, + ** but now it is registered with SQLite as requiring exactly 2. + */ + assert(argc==2); + + iMin = sqlite3_value_int(argv[0]); + if( iMin<0 ) iMin = 0; + if( iMin>=sizeof(zBuf) ) iMin = sizeof(zBuf)-1; + iMax = sqlite3_value_int(argv[1]); + if( iMax=sizeof(zBuf) ) iMax = sizeof(zBuf)-1; + n = iMin; + if( iMax>iMin ){ + sqlite3Randomness(sizeof(r), &r); + r &= 0x7fffffff; + n += r%(iMax + 1 - iMin); + } + assert( ncnt++; + if( type==SQLITE_INTEGER ){ + i64 v = sqlite3_value_int64(argv[0]); + p->rSum += v; + if( (p->approx|p->overflow)==0 ){ + i64 iNewSum = p->iSum + v; + int s1 = p->iSum >> (sizeof(i64)*8-1); + int s2 = v >> (sizeof(i64)*8-1); + int s3 = iNewSum >> (sizeof(i64)*8-1); + p->overflow = (s1&s2&~s3) | (~s1&~s2&s3); + p->iSum = iNewSum; + } + }else{ + p->rSum += sqlite3_value_double(argv[0]); + p->approx = 1; + } + } +} +static void sumFinalize(sqlite3_context *context){ + SumCtx *p; + p = sqlite3_aggregate_context(context, 0); + if( p && p->cnt>0 ){ + if( p->overflow ){ + sqlite3_result_error(context,"integer overflow",-1); + }else if( p->approx ){ + sqlite3_result_double(context, p->rSum); + }else{ + sqlite3_result_int64(context, p->iSum); + } + } +} +static void avgFinalize(sqlite3_context *context){ + SumCtx *p; + p = sqlite3_aggregate_context(context, 0); + if( p && p->cnt>0 ){ + sqlite3_result_double(context, p->rSum/(double)p->cnt); + } +} +static void totalFinalize(sqlite3_context *context){ + SumCtx *p; + p = sqlite3_aggregate_context(context, 0); + sqlite3_result_double(context, p ? p->rSum : 0.0); +} + +/* +** The following structure keeps track of state information for the +** count() aggregate function. +*/ +typedef struct CountCtx CountCtx; +struct CountCtx { + i64 n; +}; + +/* +** Routines to implement the count() aggregate function. +*/ +static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv){ + CountCtx *p; + p = sqlite3_aggregate_context(context, sizeof(*p)); + if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && p ){ + p->n++; + } +} +static void countFinalize(sqlite3_context *context){ + CountCtx *p; + p = sqlite3_aggregate_context(context, 0); + sqlite3_result_int64(context, p ? p->n : 0); +} + +/* +** Routines to implement min() and max() aggregate functions. +*/ +static void minmaxStep(sqlite3_context *context, int argc, sqlite3_value **argv){ + Mem *pArg = (Mem *)argv[0]; + Mem *pBest; + + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest)); + if( !pBest ) return; + + if( pBest->flags ){ + int max; + int cmp; + CollSeq *pColl = sqlite3GetFuncCollSeq(context); + /* This step function is used for both the min() and max() aggregates, + ** the only difference between the two being that the sense of the + ** comparison is inverted. For the max() aggregate, the + ** sqlite3_user_data() function returns (void *)-1. For min() it + ** returns (void *)db, where db is the sqlite3* database pointer. + ** Therefore the next statement sets variable 'max' to 1 for the max() + ** aggregate, or 0 for min(). + */ + max = sqlite3_user_data(context)!=0; + cmp = sqlite3MemCompare(pBest, pArg, pColl); + if( (max && cmp<0) || (!max && cmp>0) ){ + sqlite3VdbeMemCopy(pBest, pArg); + } + }else{ + sqlite3VdbeMemCopy(pBest, pArg); + } +} +static void minMaxFinalize(sqlite3_context *context){ + sqlite3_value *pRes; + pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0); + if( pRes ){ + if( pRes->flags ){ + sqlite3_result_value(context, pRes); + } + sqlite3VdbeMemRelease(pRes); + } +} + + +/* +** This function registered all of the above C functions as SQL +** functions. This should be the only routine in this file with +** external linkage. +*/ +void sqlite3RegisterBuiltinFunctions(sqlite3 *db){ + static const struct { + char *zName; + signed char nArg; + u8 argType; /* ff: db 1: 0, 2: 1, 3: 2,... N: N-1. */ + u8 eTextRep; /* 1: UTF-16. 0: UTF-8 */ + u8 needCollSeq; + void (*xFunc)(sqlite3_context*,int,sqlite3_value **); + } aFuncs[] = { + { "min", -1, 0, SQLITE_UTF8, 1, minmaxFunc }, + { "min", 0, 0, SQLITE_UTF8, 1, 0 }, + { "max", -1, 1, SQLITE_UTF8, 1, minmaxFunc }, + { "max", 0, 1, SQLITE_UTF8, 1, 0 }, + { "typeof", 1, 0, SQLITE_UTF8, 0, typeofFunc }, + { "length", 1, 0, SQLITE_UTF8, 0, lengthFunc }, + { "substr", 3, 0, SQLITE_UTF8, 0, substrFunc }, + { "abs", 1, 0, SQLITE_UTF8, 0, absFunc }, + { "round", 1, 0, SQLITE_UTF8, 0, roundFunc }, + { "round", 2, 0, SQLITE_UTF8, 0, roundFunc }, + { "upper", 1, 0, SQLITE_UTF8, 0, upperFunc }, + { "lower", 1, 0, SQLITE_UTF8, 0, lowerFunc }, + { "coalesce", -1, 0, SQLITE_UTF8, 0, ifnullFunc }, + { "coalesce", 0, 0, SQLITE_UTF8, 0, 0 }, + { "coalesce", 1, 0, SQLITE_UTF8, 0, 0 }, + { "hex", 1, 0, SQLITE_UTF8, 0, hexFunc }, + { "ifnull", 2, 0, SQLITE_UTF8, 1, ifnullFunc }, + { "random", -1, 0, SQLITE_UTF8, 0, randomFunc }, + { "randomblob", 1, 0, SQLITE_UTF8, 0, randomBlob }, + { "nullif", 2, 0, SQLITE_UTF8, 1, nullifFunc }, + { "sqlite_version", 0, 0, SQLITE_UTF8, 0, versionFunc}, + { "quote", 1, 0, SQLITE_UTF8, 0, quoteFunc }, + { "last_insert_rowid", 0, 0xff, SQLITE_UTF8, 0, last_insert_rowid }, + { "changes", 0, 0xff, SQLITE_UTF8, 0, changes }, + { "total_changes", 0, 0xff, SQLITE_UTF8, 0, total_changes }, + { "replace", 3, 0, SQLITE_UTF8, 0, replaceFunc }, + { "ltrim", 1, 1, SQLITE_UTF8, 0, trimFunc }, + { "ltrim", 2, 1, SQLITE_UTF8, 0, trimFunc }, + { "rtrim", 1, 2, SQLITE_UTF8, 0, trimFunc }, + { "rtrim", 2, 2, SQLITE_UTF8, 0, trimFunc }, + { "trim", 1, 3, SQLITE_UTF8, 0, trimFunc }, + { "trim", 2, 3, SQLITE_UTF8, 0, trimFunc }, + { "zeroblob", 1, 0, SQLITE_UTF8, 0, zeroblobFunc }, +#ifdef SQLITE_SOUNDEX + { "soundex", 1, 0, SQLITE_UTF8, 0, soundexFunc}, +#endif +#ifndef SQLITE_OMIT_LOAD_EXTENSION + { "load_extension", 1, 0xff, SQLITE_UTF8, 0, loadExt }, + { "load_extension", 2, 0xff, SQLITE_UTF8, 0, loadExt }, +#endif +#ifdef SQLITE_TEST + { "randstr", 2, 0, SQLITE_UTF8, 0, randStr }, + { "test_destructor", 1, 0xff, SQLITE_UTF8, 0, test_destructor}, + { "test_destructor_count", 0, 0, SQLITE_UTF8, 0, test_destructor_count}, + { "test_auxdata", -1, 0, SQLITE_UTF8, 0, test_auxdata}, + { "test_error", 1, 0, SQLITE_UTF8, 0, test_error}, +#endif + }; + static const struct { + char *zName; + signed char nArg; + u8 argType; + u8 needCollSeq; + void (*xStep)(sqlite3_context*,int,sqlite3_value**); + void (*xFinalize)(sqlite3_context*); + } aAggs[] = { + { "min", 1, 0, 1, minmaxStep, minMaxFinalize }, + { "max", 1, 1, 1, minmaxStep, minMaxFinalize }, + { "sum", 1, 0, 0, sumStep, sumFinalize }, + { "total", 1, 0, 0, sumStep, totalFinalize }, + { "avg", 1, 0, 0, sumStep, avgFinalize }, + { "count", 0, 0, 0, countStep, countFinalize }, + { "count", 1, 0, 0, countStep, countFinalize }, + }; + int i; + + for(i=0; ineedCollSeq = 1; + } + } + } +#ifndef SQLITE_OMIT_ALTERTABLE + sqlite3AlterFunctions(db); +#endif +#ifndef SQLITE_OMIT_PARSER + sqlite3AttachFunctions(db); +#endif + for(i=0; ineedCollSeq = 1; + } + } + } + sqlite3RegisterDateTimeFunctions(db); + if( !db->mallocFailed ){ + int rc = sqlite3_overload_function(db, "MATCH", 2); + assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); + if( rc==SQLITE_NOMEM ){ + db->mallocFailed = 1; + } + } +#ifdef SQLITE_SSE + (void)sqlite3SseFunctions(db); +#endif +#ifdef SQLITE_CASE_SENSITIVE_LIKE + sqlite3RegisterLikeFunctions(db, 1); +#else + sqlite3RegisterLikeFunctions(db, 0); +#endif +} + +/* +** Set the LIKEOPT flag on the 2-argument function with the given name. +*/ +static void setLikeOptFlag(sqlite3 *db, const char *zName, int flagVal){ + FuncDef *pDef; + pDef = sqlite3FindFunction(db, zName, strlen(zName), 2, SQLITE_UTF8, 0); + if( pDef ){ + pDef->flags = flagVal; + } +} + +/* +** Register the built-in LIKE and GLOB functions. The caseSensitive +** parameter determines whether or not the LIKE operator is case +** sensitive. GLOB is always case sensitive. +*/ +void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){ + struct compareInfo *pInfo; + if( caseSensitive ){ + pInfo = (struct compareInfo*)&likeInfoAlt; + }else{ + pInfo = (struct compareInfo*)&likeInfoNorm; + } + sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0); + sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0); + sqlite3CreateFunc(db, "glob", 2, SQLITE_UTF8, + (struct compareInfo*)&globInfo, likeFunc, 0,0); + setLikeOptFlag(db, "glob", SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE); + setLikeOptFlag(db, "like", + caseSensitive ? (SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE) : SQLITE_FUNC_LIKE); +} + +/* +** pExpr points to an expression which implements a function. If +** it is appropriate to apply the LIKE optimization to that function +** then set aWc[0] through aWc[2] to the wildcard characters and +** return TRUE. If the function is not a LIKE-style function then +** return FALSE. +*/ +int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){ + FuncDef *pDef; + if( pExpr->op!=TK_FUNCTION || !pExpr->pList ){ + return 0; + } + if( pExpr->pList->nExpr!=2 ){ + return 0; + } + pDef = sqlite3FindFunction(db, (char*)pExpr->token.z, pExpr->token.n, 2, + SQLITE_UTF8, 0); + if( pDef==0 || (pDef->flags & SQLITE_FUNC_LIKE)==0 ){ + return 0; + } + + /* The memcpy() statement assumes that the wildcard characters are + ** the first three statements in the compareInfo structure. The + ** asserts() that follow verify that assumption + */ + memcpy(aWc, pDef->pUserData, 3); + assert( (char*)&likeInfoAlt == (char*)&likeInfoAlt.matchAll ); + assert( &((char*)&likeInfoAlt)[1] == (char*)&likeInfoAlt.matchOne ); + assert( &((char*)&likeInfoAlt)[2] == (char*)&likeInfoAlt.matchSet ); + *pIsNocase = (pDef->flags & SQLITE_FUNC_CASE)==0; + return 1; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/hash.c b/libraries/sqlite/unix/sqlite-3.5.1/src/hash.c new file mode 100644 index 0000000000..a88d16b06b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/hash.c @@ -0,0 +1,418 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables +** used in SQLite. +** +** $Id: hash.c,v 1.24 2007/09/04 14:31:47 danielk1977 Exp $ +*/ +#include "sqliteInt.h" +#include + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants SQLITE_HASH_INT, SQLITE_HASH_POINTER, +** SQLITE_HASH_BINARY, or SQLITE_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. CopyKey only makes +** sense for SQLITE_HASH_STRING and SQLITE_HASH_BINARY and is ignored +** for other key classes. +*/ +void sqlite3HashInit(Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=SQLITE_HASH_STRING && keyClass<=SQLITE_HASH_BINARY ); + pNew->keyClass = keyClass; +#if 0 + if( keyClass==SQLITE_HASH_POINTER || keyClass==SQLITE_HASH_INT ) copyKey = 0; +#endif + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3HashClear(Hash *pH){ + HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + if( pH->ht ) sqlite3_free(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + sqlite3_free(elem->pKey); + } + sqlite3_free(elem); + elem = next_elem; + } + pH->count = 0; +} + +#if 0 /* NOT USED */ +/* +** Hash and comparison functions when the mode is SQLITE_HASH_INT +*/ +static int intHash(const void *pKey, int nKey){ + return nKey ^ (nKey<<8) ^ (nKey>>8); +} +static int intCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + return n2 - n1; +} +#endif + +#if 0 /* NOT USED */ +/* +** Hash and comparison functions when the mode is SQLITE_HASH_POINTER +*/ +static int ptrHash(const void *pKey, int nKey){ + uptr x = Addr(pKey); + return x ^ (x<<8) ^ (x>>8); +} +static int ptrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( pKey1==pKey2 ) return 0; + if( pKey1 0 ){ + h = (h<<3) ^ h ^ sqlite3UpperToLower[(unsigned char)*z++]; + nKey--; + } + return h & 0x7fffffff; +} +static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return sqlite3StrNICmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is SQLITE_HASH_BINARY +*/ +static int binHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ +#if 0 /* HASH_INT and HASH_POINTER are never used */ + switch( keyClass ){ + case SQLITE_HASH_INT: return &intHash; + case SQLITE_HASH_POINTER: return &ptrHash; + case SQLITE_HASH_STRING: return &strHash; + case SQLITE_HASH_BINARY: return &binHash;; + default: break; + } + return 0; +#else + if( keyClass==SQLITE_HASH_STRING ){ + return &strHash; + }else{ + assert( keyClass==SQLITE_HASH_BINARY ); + return &binHash; + } +#endif +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ +#if 0 /* HASH_INT and HASH_POINTER are never used */ + switch( keyClass ){ + case SQLITE_HASH_INT: return &intCompare; + case SQLITE_HASH_POINTER: return &ptrCompare; + case SQLITE_HASH_STRING: return &strCompare; + case SQLITE_HASH_BINARY: return &binCompare; + default: break; + } + return 0; +#else + if( keyClass==SQLITE_HASH_STRING ){ + return &strCompare; + }else{ + assert( keyClass==SQLITE_HASH_BINARY ); + return &binCompare; + } +#endif +} + +/* Link an element into the hash table +*/ +static void insertElement( + Hash *pH, /* The complete hash table */ + struct _ht *pEntry, /* The entry into which pNew is inserted */ + HashElem *pNew /* The element to be inserted */ +){ + HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqlite3_malloc() fails. +*/ +static void rehash(Hash *pH, int new_size){ + struct _ht *new_ht; /* The new hash table */ + HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + + /* There is a call to sqlite3_malloc() inside rehash(). If there is + ** already an allocation at pH->ht, then if this malloc() fails it + ** is benign (since failing to resize a hash table is a performance + ** hit only, not a fatal error). + */ + sqlite3MallocBenignFailure(pH->htsize>0); + + new_ht = (struct _ht *)sqlite3MallocZero( new_size*sizeof(struct _ht) ); + if( new_ht==0 ) return; + if( pH->ht ) sqlite3_free(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static HashElem *findElementGivenHash( + const Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + Hash *pH, /* The pH containing "elem" */ + HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey ){ + sqlite3_free(elem->pKey); + } + sqlite3_free( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + sqlite3HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return a pointer to the corresponding +** HashElem structure for this element if it is found, or NULL +** otherwise. +*/ +HashElem *sqlite3HashFindElem(const Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem; +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3HashFind(const Hash *pH, const void *pKey, int nKey){ + HashElem *elem; /* The element that matches key */ + elem = sqlite3HashFindElem(pH, pKey, nKey); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3HashInsert(Hash *pH, const void *pKey, int nKey, void *data){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + HashElem *elem; /* Used to loop thru the element list */ + HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = findElementGivenHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + if( !pH->copyKey ){ + elem->pKey = (void *)pKey; + } + assert(nKey==elem->nKey); + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (HashElem*)sqlite3_malloc( sizeof(HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = sqlite3_malloc( nKey ); + if( new_elem->pKey==0 ){ + sqlite3_free(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + if( pH->copyKey ){ + sqlite3_free(new_elem->pKey); + } + sqlite3_free(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + insertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/hash.h b/libraries/sqlite/unix/sqlite-3.5.1/src/hash.h new file mode 100644 index 0000000000..e3274e9e44 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/hash.h @@ -0,0 +1,110 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. +** +** $Id: hash.h,v 1.11 2007/09/04 14:31:47 danielk1977 Exp $ +*/ +#ifndef _SQLITE_HASH_H_ +#define _SQLITE_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct Hash Hash; +typedef struct HashElem HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct Hash { + char keyClass; /* SQLITE_HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + int htsize; /* Number of buckets in the hash table */ + HashElem *first; /* The first element of the array */ + struct _ht { /* the hash table */ + int count; /* Number of entries with this hash */ + HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct HashElem { + HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 4 different modes of operation for a hash table: +** +** SQLITE_HASH_INT nKey is used as the key and pKey is ignored. +** +** SQLITE_HASH_POINTER pKey is used as the key and nKey is ignored. +** +** SQLITE_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is ignored in comparisons. +** +** SQLITE_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made for SQLITE_HASH_STRING and SQLITE_HASH_BINARY +** if the copyKey parameter to HashInit is 1. +*/ +/* #define SQLITE_HASH_INT 1 // NOT USED */ +/* #define SQLITE_HASH_POINTER 2 // NOT USED */ +#define SQLITE_HASH_STRING 3 +#define SQLITE_HASH_BINARY 4 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3HashInit(Hash*, int keytype, int copyKey); +void *sqlite3HashInsert(Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3HashFind(const Hash*, const void *pKey, int nKey); +HashElem *sqlite3HashFindElem(const Hash*, const void *pKey, int nKey); +void sqlite3HashClear(Hash*); + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** Hash h; +** HashElem *p; +** ... +** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){ +** SomeStructure *pData = sqliteHashData(p); +** // do something with pData +** } +*/ +#define sqliteHashFirst(H) ((H)->first) +#define sqliteHashNext(E) ((E)->next) +#define sqliteHashData(E) ((E)->data) +#define sqliteHashKey(E) ((E)->pKey) +#define sqliteHashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define sqliteHashCount(H) ((H)->count) + +#endif /* _SQLITE_HASH_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/insert.c b/libraries/sqlite/unix/sqlite-3.5.1/src/insert.c new file mode 100644 index 0000000000..9d68c9539a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/insert.c @@ -0,0 +1,1605 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the parser +** to handle INSERT statements in SQLite. +** +** $Id: insert.c,v 1.192 2007/09/03 17:30:07 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +/* +** Set P3 of the most recently inserted opcode to a column affinity +** string for index pIdx. A column affinity string has one character +** for each column in the table, according to the affinity of the column: +** +** Character Column affinity +** ------------------------------ +** 'a' TEXT +** 'b' NONE +** 'c' NUMERIC +** 'd' INTEGER +** 'e' REAL +*/ +void sqlite3IndexAffinityStr(Vdbe *v, Index *pIdx){ + if( !pIdx->zColAff ){ + /* The first time a column affinity string for a particular index is + ** required, it is allocated and populated here. It is then stored as + ** a member of the Index structure for subsequent use. + ** + ** The column affinity string will eventually be deleted by + ** sqliteDeleteIndex() when the Index structure itself is cleaned + ** up. + */ + int n; + Table *pTab = pIdx->pTable; + sqlite3 *db = sqlite3VdbeDb(v); + pIdx->zColAff = (char *)sqlite3DbMallocZero(db, pIdx->nColumn+1); + if( !pIdx->zColAff ){ + return; + } + for(n=0; nnColumn; n++){ + pIdx->zColAff[n] = pTab->aCol[pIdx->aiColumn[n]].affinity; + } + pIdx->zColAff[pIdx->nColumn] = '\0'; + } + + sqlite3VdbeChangeP3(v, -1, pIdx->zColAff, 0); +} + +/* +** Set P3 of the most recently inserted opcode to a column affinity +** string for table pTab. A column affinity string has one character +** for each column indexed by the index, according to the affinity of the +** column: +** +** Character Column affinity +** ------------------------------ +** 'a' TEXT +** 'b' NONE +** 'c' NUMERIC +** 'd' INTEGER +** 'e' REAL +*/ +void sqlite3TableAffinityStr(Vdbe *v, Table *pTab){ + /* The first time a column affinity string for a particular table + ** is required, it is allocated and populated here. It is then + ** stored as a member of the Table structure for subsequent use. + ** + ** The column affinity string will eventually be deleted by + ** sqlite3DeleteTable() when the Table structure itself is cleaned up. + */ + if( !pTab->zColAff ){ + char *zColAff; + int i; + sqlite3 *db = sqlite3VdbeDb(v); + + zColAff = (char *)sqlite3DbMallocZero(db, pTab->nCol+1); + if( !zColAff ){ + return; + } + + for(i=0; inCol; i++){ + zColAff[i] = pTab->aCol[i].affinity; + } + zColAff[pTab->nCol] = '\0'; + + pTab->zColAff = zColAff; + } + + sqlite3VdbeChangeP3(v, -1, pTab->zColAff, 0); +} + +/* +** Return non-zero if SELECT statement p opens the table with rootpage +** iTab in database iDb. This is used to see if a statement of the form +** "INSERT INTO SELECT ..." can run without using temporary +** table for the results of the SELECT. +** +** No checking is done for sub-selects that are part of expressions. +*/ +static int selectReadsTable(Select *p, Schema *pSchema, int iTab){ + int i; + struct SrcList_item *pItem; + if( p->pSrc==0 ) return 0; + for(i=0, pItem=p->pSrc->a; ipSrc->nSrc; i++, pItem++){ + if( pItem->pSelect ){ + if( selectReadsTable(pItem->pSelect, pSchema, iTab) ) return 1; + }else{ + if( pItem->pTab->pSchema==pSchema && pItem->pTab->tnum==iTab ) return 1; + } + } + return 0; +} + +#ifndef SQLITE_OMIT_AUTOINCREMENT +/* +** Write out code to initialize the autoincrement logic. This code +** looks up the current autoincrement value in the sqlite_sequence +** table and stores that value in a memory cell. Code generated by +** autoIncStep() will keep that memory cell holding the largest +** rowid value. Code generated by autoIncEnd() will write the new +** largest value of the counter back into the sqlite_sequence table. +** +** This routine returns the index of the mem[] cell that contains +** the maximum rowid counter. +** +** Two memory cells are allocated. The next memory cell after the +** one returned holds the rowid in sqlite_sequence where we will +** write back the revised maximum rowid. +*/ +static int autoIncBegin( + Parse *pParse, /* Parsing context */ + int iDb, /* Index of the database holding pTab */ + Table *pTab /* The table we are writing to */ +){ + int memId = 0; + if( pTab->autoInc ){ + Vdbe *v = pParse->pVdbe; + Db *pDb = &pParse->db->aDb[iDb]; + int iCur = pParse->nTab; + int addr; + assert( v ); + addr = sqlite3VdbeCurrentAddr(v); + memId = pParse->nMem+1; + pParse->nMem += 2; + sqlite3OpenTable(pParse, iCur, iDb, pDb->pSchema->pSeqTab, OP_OpenRead); + sqlite3VdbeAddOp(v, OP_Rewind, iCur, addr+13); + sqlite3VdbeAddOp(v, OP_Column, iCur, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->zName, 0); + sqlite3VdbeAddOp(v, OP_Ne, 0x100, addr+12); + sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); + sqlite3VdbeAddOp(v, OP_MemStore, memId-1, 1); + sqlite3VdbeAddOp(v, OP_Column, iCur, 1); + sqlite3VdbeAddOp(v, OP_MemStore, memId, 1); + sqlite3VdbeAddOp(v, OP_Goto, 0, addr+13); + sqlite3VdbeAddOp(v, OP_Next, iCur, addr+4); + sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + } + return memId; +} + +/* +** Update the maximum rowid for an autoincrement calculation. +** +** This routine should be called when the top of the stack holds a +** new rowid that is about to be inserted. If that new rowid is +** larger than the maximum rowid in the memId memory cell, then the +** memory cell is updated. The stack is unchanged. +*/ +static void autoIncStep(Parse *pParse, int memId){ + if( memId>0 ){ + sqlite3VdbeAddOp(pParse->pVdbe, OP_MemMax, memId, 0); + } +} + +/* +** After doing one or more inserts, the maximum rowid is stored +** in mem[memId]. Generate code to write this value back into the +** the sqlite_sequence table. +*/ +static void autoIncEnd( + Parse *pParse, /* The parsing context */ + int iDb, /* Index of the database holding pTab */ + Table *pTab, /* Table we are inserting into */ + int memId /* Memory cell holding the maximum rowid */ +){ + if( pTab->autoInc ){ + int iCur = pParse->nTab; + Vdbe *v = pParse->pVdbe; + Db *pDb = &pParse->db->aDb[iDb]; + int addr; + assert( v ); + addr = sqlite3VdbeCurrentAddr(v); + sqlite3OpenTable(pParse, iCur, iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); + sqlite3VdbeAddOp(v, OP_MemLoad, memId-1, 0); + sqlite3VdbeAddOp(v, OP_NotNull, -1, addr+7); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp(v, OP_NewRowid, iCur, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->zName, 0); + sqlite3VdbeAddOp(v, OP_MemLoad, memId, 0); + sqlite3VdbeAddOp(v, OP_MakeRecord, 2, 0); + sqlite3VdbeAddOp(v, OP_Insert, iCur, OPFLAG_APPEND); + sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + } +} +#else +/* +** If SQLITE_OMIT_AUTOINCREMENT is defined, then the three routines +** above are all no-ops +*/ +# define autoIncBegin(A,B,C) (0) +# define autoIncStep(A,B) +# define autoIncEnd(A,B,C,D) +#endif /* SQLITE_OMIT_AUTOINCREMENT */ + + +/* Forward declaration */ +static int xferOptimization( + Parse *pParse, /* Parser context */ + Table *pDest, /* The table we are inserting into */ + Select *pSelect, /* A SELECT statement to use as the data source */ + int onError, /* How to handle constraint errors */ + int iDbDest /* The database of pDest */ +); + +/* +** This routine is call to handle SQL of the following forms: +** +** insert into TABLE (IDLIST) values(EXPRLIST) +** insert into TABLE (IDLIST) select +** +** The IDLIST following the table name is always optional. If omitted, +** then a list of all columns for the table is substituted. The IDLIST +** appears in the pColumn parameter. pColumn is NULL if IDLIST is omitted. +** +** The pList parameter holds EXPRLIST in the first form of the INSERT +** statement above, and pSelect is NULL. For the second form, pList is +** NULL and pSelect is a pointer to the select statement used to generate +** data for the insert. +** +** The code generated follows one of four templates. For a simple +** select with data coming from a VALUES clause, the code executes +** once straight down through. The template looks like this: +** +** open write cursor to
and its indices +** puts VALUES clause expressions onto the stack +** write the resulting record into
+** cleanup +** +** The three remaining templates assume the statement is of the form +** +** INSERT INTO
SELECT ... +** +** If the SELECT clause is of the restricted form "SELECT * FROM " - +** in other words if the SELECT pulls all columns from a single table +** and there is no WHERE or LIMIT or GROUP BY or ORDER BY clauses, and +** if and are distinct tables but have identical +** schemas, including all the same indices, then a special optimization +** is invoked that copies raw records from over to . +** See the xferOptimization() function for the implementation of this +** template. This is the second template. +** +** open a write cursor to
+** open read cursor on +** transfer all records in over to
+** close cursors +** foreach index on
+** open a write cursor on the
index +** open a read cursor on the corresponding index +** transfer all records from the read to the write cursors +** close cursors +** end foreach +** +** The third template is for when the second template does not apply +** and the SELECT clause does not read from
at any time. +** The generated code follows this template: +** +** goto B +** A: setup for the SELECT +** loop over the rows in the SELECT +** gosub C +** end loop +** cleanup after the SELECT +** goto D +** B: open write cursor to
and its indices +** goto A +** C: insert the select result into
+** return +** D: cleanup +** +** The fourth template is used if the insert statement takes its +** values from a SELECT but the data is being inserted into a table +** that is also read as part of the SELECT. In the third form, +** we have to use a intermediate table to store the results of +** the select. The template is like this: +** +** goto B +** A: setup for the SELECT +** loop over the tables in the SELECT +** gosub C +** end loop +** cleanup after the SELECT +** goto D +** C: insert the select result into the intermediate table +** return +** B: open a cursor to an intermediate table +** goto A +** D: open write cursor to
and its indices +** loop over the intermediate table +** transfer values form intermediate table into
+** end the loop +** cleanup +*/ +void sqlite3Insert( + Parse *pParse, /* Parser context */ + SrcList *pTabList, /* Name of table into which we are inserting */ + ExprList *pList, /* List of values to be inserted */ + Select *pSelect, /* A SELECT statement to use as the data source */ + IdList *pColumn, /* Column names corresponding to IDLIST. */ + int onError /* How to handle constraint errors */ +){ + Table *pTab; /* The table to insert into */ + char *zTab; /* Name of the table into which we are inserting */ + const char *zDb; /* Name of the database holding this table */ + int i, j, idx; /* Loop counters */ + Vdbe *v; /* Generate code into this virtual machine */ + Index *pIdx; /* For looping over indices of the table */ + int nColumn; /* Number of columns in the data */ + int base = 0; /* VDBE Cursor number for pTab */ + int iCont=0,iBreak=0; /* Beginning and end of the loop over srcTab */ + sqlite3 *db; /* The main database structure */ + int keyColumn = -1; /* Column that is the INTEGER PRIMARY KEY */ + int endOfLoop; /* Label for the end of the insertion loop */ + int useTempTable = 0; /* Store SELECT results in intermediate table */ + int srcTab = 0; /* Data comes from this temporary cursor if >=0 */ + int iSelectLoop = 0; /* Address of code that implements the SELECT */ + int iCleanup = 0; /* Address of the cleanup code */ + int iInsertBlock = 0; /* Address of the subroutine used to insert data */ + int iCntMem = 0; /* Memory cell used for the row counter */ + int newIdx = -1; /* Cursor for the NEW table */ + Db *pDb; /* The database containing table being inserted into */ + int counterMem = 0; /* Memory cell holding AUTOINCREMENT counter */ + int appendFlag = 0; /* True if the insert is likely to be an append */ + int iDb; + + int nHidden = 0; + +#ifndef SQLITE_OMIT_TRIGGER + int isView; /* True if attempting to insert into a view */ + int triggers_exist = 0; /* True if there are FOR EACH ROW triggers */ +#endif + + db = pParse->db; + if( pParse->nErr || db->mallocFailed ){ + goto insert_cleanup; + } + + /* Locate the table into which we will be inserting new information. + */ + assert( pTabList->nSrc==1 ); + zTab = pTabList->a[0].zName; + if( zTab==0 ) goto insert_cleanup; + pTab = sqlite3SrcListLookup(pParse, pTabList); + if( pTab==0 ){ + goto insert_cleanup; + } + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDbnDb ); + pDb = &db->aDb[iDb]; + zDb = pDb->zName; + if( sqlite3AuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0, zDb) ){ + goto insert_cleanup; + } + + /* Figure out if we have any triggers and if the table being + ** inserted into is a view + */ +#ifndef SQLITE_OMIT_TRIGGER + triggers_exist = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0); + isView = pTab->pSelect!=0; +#else +# define triggers_exist 0 +# define isView 0 +#endif +#ifdef SQLITE_OMIT_VIEW +# undef isView +# define isView 0 +#endif + + /* Ensure that: + * (a) the table is not read-only, + * (b) that if it is a view then ON INSERT triggers exist + */ + if( sqlite3IsReadOnly(pParse, pTab, triggers_exist) ){ + goto insert_cleanup; + } + assert( pTab!=0 ); + + /* If pTab is really a view, make sure it has been initialized. + ** ViewGetColumnNames() is a no-op if pTab is not a view (or virtual + ** module table). + */ + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto insert_cleanup; + } + + /* Allocate a VDBE + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto insert_cleanup; + if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); + sqlite3BeginWriteOperation(pParse, pSelect || triggers_exist, iDb); + + /* if there are row triggers, allocate a temp table for new.* references. */ + if( triggers_exist ){ + newIdx = pParse->nTab++; + } + +#ifndef SQLITE_OMIT_XFER_OPT + /* If the statement is of the form + ** + ** INSERT INTO SELECT * FROM ; + ** + ** Then special optimizations can be applied that make the transfer + ** very fast and which reduce fragmentation of indices. + */ + if( pColumn==0 && xferOptimization(pParse, pTab, pSelect, onError, iDb) ){ + assert( !triggers_exist ); + assert( pList==0 ); + goto insert_cleanup; + } +#endif /* SQLITE_OMIT_XFER_OPT */ + + /* If this is an AUTOINCREMENT table, look up the sequence number in the + ** sqlite_sequence table and store it in memory cell counterMem. Also + ** remember the rowid of the sqlite_sequence table entry in memory cell + ** counterRowid. + */ + counterMem = autoIncBegin(pParse, iDb, pTab); + + /* Figure out how many columns of data are supplied. If the data + ** is coming from a SELECT statement, then this step also generates + ** all the code to implement the SELECT statement and invoke a subroutine + ** to process each row of the result. (Template 2.) If the SELECT + ** statement uses the the table that is being inserted into, then the + ** subroutine is also coded here. That subroutine stores the SELECT + ** results in a temporary table. (Template 3.) + */ + if( pSelect ){ + /* Data is coming from a SELECT. Generate code to implement that SELECT + */ + int rc, iInitCode; + iInitCode = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); + iSelectLoop = sqlite3VdbeCurrentAddr(v); + iInsertBlock = sqlite3VdbeMakeLabel(v); + + /* Resolve the expressions in the SELECT statement and execute it. */ + rc = sqlite3Select(pParse, pSelect, SRT_Subroutine, iInsertBlock,0,0,0,0); + if( rc || pParse->nErr || db->mallocFailed ){ + goto insert_cleanup; + } + + iCleanup = sqlite3VdbeMakeLabel(v); + sqlite3VdbeAddOp(v, OP_Goto, 0, iCleanup); + assert( pSelect->pEList ); + nColumn = pSelect->pEList->nExpr; + + /* Set useTempTable to TRUE if the result of the SELECT statement + ** should be written into a temporary table. Set to FALSE if each + ** row of the SELECT can be written directly into the result table. + ** + ** A temp table must be used if the table being updated is also one + ** of the tables being read by the SELECT statement. Also use a + ** temp table in the case of row triggers. + */ + if( triggers_exist || selectReadsTable(pSelect,pTab->pSchema,pTab->tnum) ){ + useTempTable = 1; + } + + if( useTempTable ){ + /* Generate the subroutine that SELECT calls to process each row of + ** the result. Store the result in a temporary table + */ + srcTab = pParse->nTab++; + sqlite3VdbeResolveLabel(v, iInsertBlock); + sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); + sqlite3VdbeAddOp(v, OP_NewRowid, srcTab, 0); + sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + sqlite3VdbeAddOp(v, OP_Insert, srcTab, OPFLAG_APPEND); + sqlite3VdbeAddOp(v, OP_Return, 0, 0); + + /* The following code runs first because the GOTO at the very top + ** of the program jumps to it. Create the temporary table, then jump + ** back up and execute the SELECT code above. + */ + sqlite3VdbeJumpHere(v, iInitCode); + sqlite3VdbeAddOp(v, OP_OpenEphemeral, srcTab, 0); + sqlite3VdbeAddOp(v, OP_SetNumColumns, srcTab, nColumn); + sqlite3VdbeAddOp(v, OP_Goto, 0, iSelectLoop); + sqlite3VdbeResolveLabel(v, iCleanup); + }else{ + sqlite3VdbeJumpHere(v, iInitCode); + } + }else{ + /* This is the case if the data for the INSERT is coming from a VALUES + ** clause + */ + NameContext sNC; + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + srcTab = -1; + useTempTable = 0; + nColumn = pList ? pList->nExpr : 0; + for(i=0; ia[i].pExpr) ){ + goto insert_cleanup; + } + } + } + + /* Make sure the number of columns in the source data matches the number + ** of columns to be inserted into the table. + */ + if( IsVirtual(pTab) ){ + for(i=0; inCol; i++){ + nHidden += (IsHiddenColumn(&pTab->aCol[i]) ? 1 : 0); + } + } + if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){ + sqlite3ErrorMsg(pParse, + "table %S has %d columns but %d values were supplied", + pTabList, 0, pTab->nCol, nColumn); + goto insert_cleanup; + } + if( pColumn!=0 && nColumn!=pColumn->nId ){ + sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId); + goto insert_cleanup; + } + + /* If the INSERT statement included an IDLIST term, then make sure + ** all elements of the IDLIST really are columns of the table and + ** remember the column indices. + ** + ** If the table has an INTEGER PRIMARY KEY column and that column + ** is named in the IDLIST, then record in the keyColumn variable + ** the index into IDLIST of the primary key column. keyColumn is + ** the index of the primary key as it appears in IDLIST, not as + ** is appears in the original table. (The index of the primary + ** key in the original table is pTab->iPKey.) + */ + if( pColumn ){ + for(i=0; inId; i++){ + pColumn->a[i].idx = -1; + } + for(i=0; inId; i++){ + for(j=0; jnCol; j++){ + if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){ + pColumn->a[i].idx = j; + if( j==pTab->iPKey ){ + keyColumn = i; + } + break; + } + } + if( j>=pTab->nCol ){ + if( sqlite3IsRowid(pColumn->a[i].zName) ){ + keyColumn = i; + }else{ + sqlite3ErrorMsg(pParse, "table %S has no column named %s", + pTabList, 0, pColumn->a[i].zName); + pParse->nErr++; + goto insert_cleanup; + } + } + } + } + + /* If there is no IDLIST term but the table has an integer primary + ** key, the set the keyColumn variable to the primary key column index + ** in the original table definition. + */ + if( pColumn==0 && nColumn>0 ){ + keyColumn = pTab->iPKey; + } + + /* Open the temp table for FOR EACH ROW triggers + */ + if( triggers_exist ){ + sqlite3VdbeAddOp(v, OP_OpenPseudo, newIdx, 0); + sqlite3VdbeAddOp(v, OP_SetNumColumns, newIdx, pTab->nCol); + } + + /* Initialize the count of rows to be inserted + */ + if( db->flags & SQLITE_CountRows ){ + iCntMem = pParse->nMem++; + sqlite3VdbeAddOp(v, OP_MemInt, 0, iCntMem); + } + + /* Open tables and indices if there are no row triggers */ + if( !triggers_exist ){ + base = pParse->nTab; + sqlite3OpenTableAndIndices(pParse, pTab, base, OP_OpenWrite); + } + + /* If the data source is a temporary table, then we have to create + ** a loop because there might be multiple rows of data. If the data + ** source is a subroutine call from the SELECT statement, then we need + ** to launch the SELECT statement processing. + */ + if( useTempTable ){ + iBreak = sqlite3VdbeMakeLabel(v); + sqlite3VdbeAddOp(v, OP_Rewind, srcTab, iBreak); + iCont = sqlite3VdbeCurrentAddr(v); + }else if( pSelect ){ + sqlite3VdbeAddOp(v, OP_Goto, 0, iSelectLoop); + sqlite3VdbeResolveLabel(v, iInsertBlock); + } + + /* Run the BEFORE and INSTEAD OF triggers, if there are any + */ + endOfLoop = sqlite3VdbeMakeLabel(v); + if( triggers_exist & TRIGGER_BEFORE ){ + + /* build the NEW.* reference row. Note that if there is an INTEGER + ** PRIMARY KEY into which a NULL is being inserted, that NULL will be + ** translated into a unique ID for the row. But on a BEFORE trigger, + ** we do not know what the unique ID will be (because the insert has + ** not happened yet) so we substitute a rowid of -1 + */ + if( keyColumn<0 ){ + sqlite3VdbeAddOp(v, OP_Integer, -1, 0); + }else if( useTempTable ){ + sqlite3VdbeAddOp(v, OP_Column, srcTab, keyColumn); + }else{ + assert( pSelect==0 ); /* Otherwise useTempTable is true */ + sqlite3ExprCode(pParse, pList->a[keyColumn].pExpr); + sqlite3VdbeAddOp(v, OP_NotNull, -1, sqlite3VdbeCurrentAddr(v)+3); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp(v, OP_Integer, -1, 0); + sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); + } + + /* Cannot have triggers on a virtual table. If it were possible, + ** this block would have to account for hidden column. + */ + assert(!IsVirtual(pTab)); + + /* Create the new column data + */ + for(i=0; inCol; i++){ + if( pColumn==0 ){ + j = i; + }else{ + for(j=0; jnId; j++){ + if( pColumn->a[j].idx==i ) break; + } + } + if( pColumn && j>=pColumn->nId ){ + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt); + }else if( useTempTable ){ + sqlite3VdbeAddOp(v, OP_Column, srcTab, j); + }else{ + assert( pSelect==0 ); /* Otherwise useTempTable is true */ + sqlite3ExprCodeAndCache(pParse, pList->a[j].pExpr); + } + } + sqlite3VdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0); + + /* If this is an INSERT on a view with an INSTEAD OF INSERT trigger, + ** do not attempt any conversions before assembling the record. + ** If this is a real table, attempt conversions as required by the + ** table column affinities. + */ + if( !isView ){ + sqlite3TableAffinityStr(v, pTab); + } + sqlite3VdbeAddOp(v, OP_Insert, newIdx, 0); + + /* Fire BEFORE or INSTEAD OF triggers */ + if( sqlite3CodeRowTrigger(pParse, TK_INSERT, 0, TRIGGER_BEFORE, pTab, + newIdx, -1, onError, endOfLoop) ){ + goto insert_cleanup; + } + } + + /* If any triggers exists, the opening of tables and indices is deferred + ** until now. + */ + if( triggers_exist && !isView ){ + base = pParse->nTab; + sqlite3OpenTableAndIndices(pParse, pTab, base, OP_OpenWrite); + } + + /* Push the record number for the new entry onto the stack. The + ** record number is a randomly generate integer created by NewRowid + ** except when the table has an INTEGER PRIMARY KEY column, in which + ** case the record number is the same as that column. + */ + if( !isView ){ + if( IsVirtual(pTab) ){ + /* The row that the VUpdate opcode will delete: none */ + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + } + if( keyColumn>=0 ){ + if( useTempTable ){ + sqlite3VdbeAddOp(v, OP_Column, srcTab, keyColumn); + }else if( pSelect ){ + sqlite3VdbeAddOp(v, OP_Dup, nColumn - keyColumn - 1, 1); + }else{ + VdbeOp *pOp; + sqlite3ExprCode(pParse, pList->a[keyColumn].pExpr); + pOp = sqlite3VdbeGetOp(v, sqlite3VdbeCurrentAddr(v) - 1); + if( pOp && pOp->opcode==OP_Null ){ + appendFlag = 1; + pOp->opcode = OP_NewRowid; + pOp->p1 = base; + pOp->p2 = counterMem; + } + } + /* If the PRIMARY KEY expression is NULL, then use OP_NewRowid + ** to generate a unique primary key value. + */ + if( !appendFlag ){ + sqlite3VdbeAddOp(v, OP_NotNull, -1, sqlite3VdbeCurrentAddr(v)+3); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp(v, OP_NewRowid, base, counterMem); + sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); + } + }else if( IsVirtual(pTab) ){ + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + }else{ + sqlite3VdbeAddOp(v, OP_NewRowid, base, counterMem); + appendFlag = 1; + } + autoIncStep(pParse, counterMem); + + /* Push onto the stack, data for all columns of the new entry, beginning + ** with the first column. + */ + nHidden = 0; + for(i=0; inCol; i++){ + if( i==pTab->iPKey ){ + /* The value of the INTEGER PRIMARY KEY column is always a NULL. + ** Whenever this column is read, the record number will be substituted + ** in its place. So will fill this column with a NULL to avoid + ** taking up data space with information that will never be used. */ + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + continue; + } + if( pColumn==0 ){ + if( IsHiddenColumn(&pTab->aCol[i]) ){ + assert( IsVirtual(pTab) ); + j = -1; + nHidden++; + }else{ + j = i - nHidden; + } + }else{ + for(j=0; jnId; j++){ + if( pColumn->a[j].idx==i ) break; + } + } + if( j<0 || nColumn==0 || (pColumn && j>=pColumn->nId) ){ + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt); + }else if( useTempTable ){ + sqlite3VdbeAddOp(v, OP_Column, srcTab, j); + }else if( pSelect ){ + sqlite3VdbeAddOp(v, OP_Dup, i+nColumn-j+IsVirtual(pTab), 1); + }else{ + sqlite3ExprCode(pParse, pList->a[j].pExpr); + } + } + + /* Generate code to check constraints and generate index keys and + ** do the insertion. + */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + pParse->pVirtualLock = pTab; + sqlite3VdbeOp3(v, OP_VUpdate, 1, pTab->nCol+2, + (const char*)pTab->pVtab, P3_VTAB); + }else +#endif + { + sqlite3GenerateConstraintChecks(pParse, pTab, base, 0, keyColumn>=0, + 0, onError, endOfLoop); + sqlite3CompleteInsertion(pParse, pTab, base, 0,0,0, + (triggers_exist & TRIGGER_AFTER)!=0 ? newIdx : -1, + appendFlag); + } + } + + /* Update the count of rows that are inserted + */ + if( (db->flags & SQLITE_CountRows)!=0 ){ + sqlite3VdbeAddOp(v, OP_MemIncr, 1, iCntMem); + } + + if( triggers_exist ){ + /* Close all tables opened */ + if( !isView ){ + sqlite3VdbeAddOp(v, OP_Close, base, 0); + for(idx=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, idx++){ + sqlite3VdbeAddOp(v, OP_Close, idx+base, 0); + } + } + + /* Code AFTER triggers */ + if( sqlite3CodeRowTrigger(pParse, TK_INSERT, 0, TRIGGER_AFTER, pTab, + newIdx, -1, onError, endOfLoop) ){ + goto insert_cleanup; + } + } + + /* The bottom of the loop, if the data source is a SELECT statement + */ + sqlite3VdbeResolveLabel(v, endOfLoop); + if( useTempTable ){ + sqlite3VdbeAddOp(v, OP_Next, srcTab, iCont); + sqlite3VdbeResolveLabel(v, iBreak); + sqlite3VdbeAddOp(v, OP_Close, srcTab, 0); + }else if( pSelect ){ + sqlite3VdbeAddOp(v, OP_Pop, nColumn, 0); + sqlite3VdbeAddOp(v, OP_Return, 0, 0); + sqlite3VdbeResolveLabel(v, iCleanup); + } + + if( !triggers_exist && !IsVirtual(pTab) ){ + /* Close all tables opened */ + sqlite3VdbeAddOp(v, OP_Close, base, 0); + for(idx=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, idx++){ + sqlite3VdbeAddOp(v, OP_Close, idx+base, 0); + } + } + + /* Update the sqlite_sequence table by storing the content of the + ** counter value in memory counterMem back into the sqlite_sequence + ** table. + */ + autoIncEnd(pParse, iDb, pTab, counterMem); + + /* + ** Return the number of rows inserted. If this routine is + ** generating code because of a call to sqlite3NestedParse(), do not + ** invoke the callback function. + */ + if( db->flags & SQLITE_CountRows && pParse->nested==0 && !pParse->trigStack ){ + sqlite3VdbeAddOp(v, OP_MemLoad, iCntMem, 0); + sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", P3_STATIC); + } + +insert_cleanup: + sqlite3SrcListDelete(pTabList); + sqlite3ExprListDelete(pList); + sqlite3SelectDelete(pSelect); + sqlite3IdListDelete(pColumn); +} + +/* +** Generate code to do a constraint check prior to an INSERT or an UPDATE. +** +** When this routine is called, the stack contains (from bottom to top) +** the following values: +** +** 1. The rowid of the row to be updated before the update. This +** value is omitted unless we are doing an UPDATE that involves a +** change to the record number. +** +** 2. The rowid of the row after the update. +** +** 3. The data in the first column of the entry after the update. +** +** i. Data from middle columns... +** +** N. The data in the last column of the entry after the update. +** +** The old rowid shown as entry (1) above is omitted unless both isUpdate +** and rowidChng are 1. isUpdate is true for UPDATEs and false for +** INSERTs and rowidChng is true if the record number is being changed. +** +** The code generated by this routine pushes additional entries onto +** the stack which are the keys for new index entries for the new record. +** The order of index keys is the same as the order of the indices on +** the pTable->pIndex list. A key is only created for index i if +** aIdxUsed!=0 and aIdxUsed[i]!=0. +** +** This routine also generates code to check constraints. NOT NULL, +** CHECK, and UNIQUE constraints are all checked. If a constraint fails, +** then the appropriate action is performed. There are five possible +** actions: ROLLBACK, ABORT, FAIL, REPLACE, and IGNORE. +** +** Constraint type Action What Happens +** --------------- ---------- ---------------------------------------- +** any ROLLBACK The current transaction is rolled back and +** sqlite3_exec() returns immediately with a +** return code of SQLITE_CONSTRAINT. +** +** any ABORT Back out changes from the current command +** only (do not do a complete rollback) then +** cause sqlite3_exec() to return immediately +** with SQLITE_CONSTRAINT. +** +** any FAIL Sqlite_exec() returns immediately with a +** return code of SQLITE_CONSTRAINT. The +** transaction is not rolled back and any +** prior changes are retained. +** +** any IGNORE The record number and data is popped from +** the stack and there is an immediate jump +** to label ignoreDest. +** +** NOT NULL REPLACE The NULL value is replace by the default +** value for that column. If the default value +** is NULL, the action is the same as ABORT. +** +** UNIQUE REPLACE The other row that conflicts with the row +** being inserted is removed. +** +** CHECK REPLACE Illegal. The results in an exception. +** +** Which action to take is determined by the overrideError parameter. +** Or if overrideError==OE_Default, then the pParse->onError parameter +** is used. Or if pParse->onError==OE_Default then the onError value +** for the constraint is used. +** +** The calling routine must open a read/write cursor for pTab with +** cursor number "base". All indices of pTab must also have open +** read/write cursors with cursor number base+i for the i-th cursor. +** Except, if there is no possibility of a REPLACE action then +** cursors do not need to be open for indices where aIdxUsed[i]==0. +** +** If the isUpdate flag is true, it means that the "base" cursor is +** initially pointing to an entry that is being updated. The isUpdate +** flag causes extra code to be generated so that the "base" cursor +** is still pointing at the same entry after the routine returns. +** Without the isUpdate flag, the "base" cursor might be moved. +*/ +void sqlite3GenerateConstraintChecks( + Parse *pParse, /* The parser context */ + Table *pTab, /* the table into which we are inserting */ + int base, /* Index of a read/write cursor pointing at pTab */ + char *aIdxUsed, /* Which indices are used. NULL means all are used */ + int rowidChng, /* True if the record number will change */ + int isUpdate, /* True for UPDATE, False for INSERT */ + int overrideError, /* Override onError to this if not OE_Default */ + int ignoreDest /* Jump to this label on an OE_Ignore resolution */ +){ + int i; + Vdbe *v; + int nCol; + int onError; + int addr; + int extra; + int iCur; + Index *pIdx; + int seenReplace = 0; + int jumpInst1=0, jumpInst2; + int hasTwoRowids = (isUpdate && rowidChng); + + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + assert( pTab->pSelect==0 ); /* This table is not a VIEW */ + nCol = pTab->nCol; + + /* Test all NOT NULL constraints. + */ + for(i=0; iiPKey ){ + continue; + } + onError = pTab->aCol[i].notNull; + if( onError==OE_None ) continue; + if( overrideError!=OE_Default ){ + onError = overrideError; + }else if( onError==OE_Default ){ + onError = OE_Abort; + } + if( onError==OE_Replace && pTab->aCol[i].pDflt==0 ){ + onError = OE_Abort; + } + sqlite3VdbeAddOp(v, OP_Dup, nCol-1-i, 1); + addr = sqlite3VdbeAddOp(v, OP_NotNull, 1, 0); + assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail + || onError==OE_Ignore || onError==OE_Replace ); + switch( onError ){ + case OE_Rollback: + case OE_Abort: + case OE_Fail: { + char *zMsg = 0; + sqlite3VdbeAddOp(v, OP_Halt, SQLITE_CONSTRAINT, onError); + sqlite3SetString(&zMsg, pTab->zName, ".", pTab->aCol[i].zName, + " may not be NULL", (char*)0); + sqlite3VdbeChangeP3(v, -1, zMsg, P3_DYNAMIC); + break; + } + case OE_Ignore: { + sqlite3VdbeAddOp(v, OP_Pop, nCol+1+hasTwoRowids, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); + break; + } + case OE_Replace: { + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt); + sqlite3VdbeAddOp(v, OP_Push, nCol-i, 0); + break; + } + } + sqlite3VdbeJumpHere(v, addr); + } + + /* Test all CHECK constraints + */ +#ifndef SQLITE_OMIT_CHECK + if( pTab->pCheck && (pParse->db->flags & SQLITE_IgnoreChecks)==0 ){ + int allOk = sqlite3VdbeMakeLabel(v); + assert( pParse->ckOffset==0 ); + pParse->ckOffset = nCol; + sqlite3ExprIfTrue(pParse, pTab->pCheck, allOk, 1); + assert( pParse->ckOffset==nCol ); + pParse->ckOffset = 0; + onError = overrideError!=OE_Default ? overrideError : OE_Abort; + if( onError==OE_Ignore ){ + sqlite3VdbeAddOp(v, OP_Pop, nCol+1+hasTwoRowids, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); + }else{ + sqlite3VdbeAddOp(v, OP_Halt, SQLITE_CONSTRAINT, onError); + } + sqlite3VdbeResolveLabel(v, allOk); + } +#endif /* !defined(SQLITE_OMIT_CHECK) */ + + /* If we have an INTEGER PRIMARY KEY, make sure the primary key + ** of the new record does not previously exist. Except, if this + ** is an UPDATE and the primary key is not changing, that is OK. + */ + if( rowidChng ){ + onError = pTab->keyConf; + if( overrideError!=OE_Default ){ + onError = overrideError; + }else if( onError==OE_Default ){ + onError = OE_Abort; + } + + if( isUpdate ){ + sqlite3VdbeAddOp(v, OP_Dup, nCol+1, 1); + sqlite3VdbeAddOp(v, OP_Dup, nCol+1, 1); + jumpInst1 = sqlite3VdbeAddOp(v, OP_Eq, 0, 0); + } + sqlite3VdbeAddOp(v, OP_Dup, nCol, 1); + jumpInst2 = sqlite3VdbeAddOp(v, OP_NotExists, base, 0); + switch( onError ){ + default: { + onError = OE_Abort; + /* Fall thru into the next case */ + } + case OE_Rollback: + case OE_Abort: + case OE_Fail: { + sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError, + "PRIMARY KEY must be unique", P3_STATIC); + break; + } + case OE_Replace: { + sqlite3GenerateRowIndexDelete(v, pTab, base, 0); + if( isUpdate ){ + sqlite3VdbeAddOp(v, OP_Dup, nCol+hasTwoRowids, 1); + sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); + } + seenReplace = 1; + break; + } + case OE_Ignore: { + assert( seenReplace==0 ); + sqlite3VdbeAddOp(v, OP_Pop, nCol+1+hasTwoRowids, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); + break; + } + } + sqlite3VdbeJumpHere(v, jumpInst2); + if( isUpdate ){ + sqlite3VdbeJumpHere(v, jumpInst1); + sqlite3VdbeAddOp(v, OP_Dup, nCol+1, 1); + sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); + } + } + + /* Test all UNIQUE constraints by creating entries for each UNIQUE + ** index and making sure that duplicate entries do not already exist. + ** Add the new records to the indices as we go. + */ + extra = -1; + for(iCur=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, iCur++){ + if( aIdxUsed && aIdxUsed[iCur]==0 ) continue; /* Skip unused indices */ + extra++; + + /* Create a key for accessing the index entry */ + sqlite3VdbeAddOp(v, OP_Dup, nCol+extra, 1); + for(i=0; inColumn; i++){ + int idx = pIdx->aiColumn[i]; + if( idx==pTab->iPKey ){ + sqlite3VdbeAddOp(v, OP_Dup, i+extra+nCol+1, 1); + }else{ + sqlite3VdbeAddOp(v, OP_Dup, i+extra+nCol-idx, 1); + } + } + jumpInst1 = sqlite3VdbeAddOp(v, OP_MakeIdxRec, pIdx->nColumn, 0); + sqlite3IndexAffinityStr(v, pIdx); + + /* Find out what action to take in case there is an indexing conflict */ + onError = pIdx->onError; + if( onError==OE_None ) continue; /* pIdx is not a UNIQUE index */ + if( overrideError!=OE_Default ){ + onError = overrideError; + }else if( onError==OE_Default ){ + onError = OE_Abort; + } + if( seenReplace ){ + if( onError==OE_Ignore ) onError = OE_Replace; + else if( onError==OE_Fail ) onError = OE_Abort; + } + + + /* Check to see if the new index entry will be unique */ + sqlite3VdbeAddOp(v, OP_Dup, extra+nCol+1+hasTwoRowids, 1); + jumpInst2 = sqlite3VdbeAddOp(v, OP_IsUnique, base+iCur+1, 0); + + /* Generate code that executes if the new index entry is not unique */ + assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail + || onError==OE_Ignore || onError==OE_Replace ); + switch( onError ){ + case OE_Rollback: + case OE_Abort: + case OE_Fail: { + int j, n1, n2; + char zErrMsg[200]; + sqlite3_snprintf(sizeof(zErrMsg), zErrMsg, + pIdx->nColumn>1 ? "columns " : "column "); + n1 = strlen(zErrMsg); + for(j=0; jnColumn && n1aCol[pIdx->aiColumn[j]].zName; + n2 = strlen(zCol); + if( j>0 ){ + sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], ", "); + n1 += 2; + } + if( n1+n2>sizeof(zErrMsg)-30 ){ + sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], "..."); + n1 += 3; + break; + }else{ + sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], "%s", zCol); + n1 += n2; + } + } + sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], + pIdx->nColumn>1 ? " are not unique" : " is not unique"); + sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError, zErrMsg, 0); + break; + } + case OE_Ignore: { + assert( seenReplace==0 ); + sqlite3VdbeAddOp(v, OP_Pop, nCol+extra+3+hasTwoRowids, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); + break; + } + case OE_Replace: { + sqlite3GenerateRowDelete(pParse->db, v, pTab, base, 0); + if( isUpdate ){ + sqlite3VdbeAddOp(v, OP_Dup, nCol+extra+1+hasTwoRowids, 1); + sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); + } + seenReplace = 1; + break; + } + } +#if NULL_DISTINCT_FOR_UNIQUE + sqlite3VdbeJumpHere(v, jumpInst1); +#endif + sqlite3VdbeJumpHere(v, jumpInst2); + } +} + +/* +** This routine generates code to finish the INSERT or UPDATE operation +** that was started by a prior call to sqlite3GenerateConstraintChecks. +** The stack must contain keys for all active indices followed by data +** and the rowid for the new entry. This routine creates the new +** entries in all indices and in the main table. +** +** The arguments to this routine should be the same as the first six +** arguments to sqlite3GenerateConstraintChecks. +*/ +void sqlite3CompleteInsertion( + Parse *pParse, /* The parser context */ + Table *pTab, /* the table into which we are inserting */ + int base, /* Index of a read/write cursor pointing at pTab */ + char *aIdxUsed, /* Which indices are used. NULL means all are used */ + int rowidChng, /* True if the record number will change */ + int isUpdate, /* True for UPDATE, False for INSERT */ + int newIdx, /* Index of NEW table for triggers. -1 if none */ + int appendBias /* True if this is likely to be an append */ +){ + int i; + Vdbe *v; + int nIdx; + Index *pIdx; + int pik_flags; + + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + assert( pTab->pSelect==0 ); /* This table is not a VIEW */ + for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){} + for(i=nIdx-1; i>=0; i--){ + if( aIdxUsed && aIdxUsed[i]==0 ) continue; + sqlite3VdbeAddOp(v, OP_IdxInsert, base+i+1, 0); + } + sqlite3VdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0); + sqlite3TableAffinityStr(v, pTab); +#ifndef SQLITE_OMIT_TRIGGER + if( newIdx>=0 ){ + sqlite3VdbeAddOp(v, OP_Dup, 1, 0); + sqlite3VdbeAddOp(v, OP_Dup, 1, 0); + sqlite3VdbeAddOp(v, OP_Insert, newIdx, 0); + } +#endif + if( pParse->nested ){ + pik_flags = 0; + }else{ + pik_flags = OPFLAG_NCHANGE; + pik_flags |= (isUpdate?OPFLAG_ISUPDATE:OPFLAG_LASTROWID); + } + if( appendBias ){ + pik_flags |= OPFLAG_APPEND; + } + sqlite3VdbeAddOp(v, OP_Insert, base, pik_flags); + if( !pParse->nested ){ + sqlite3VdbeChangeP3(v, -1, pTab->zName, P3_STATIC); + } + + if( isUpdate && rowidChng ){ + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + } +} + +/* +** Generate code that will open cursors for a table and for all +** indices of that table. The "base" parameter is the cursor number used +** for the table. Indices are opened on subsequent cursors. +*/ +void sqlite3OpenTableAndIndices( + Parse *pParse, /* Parsing context */ + Table *pTab, /* Table to be opened */ + int base, /* Cursor number assigned to the table */ + int op /* OP_OpenRead or OP_OpenWrite */ +){ + int i; + int iDb; + Index *pIdx; + Vdbe *v; + + if( IsVirtual(pTab) ) return; + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + sqlite3OpenTable(pParse, base, iDb, pTab, op); + for(i=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ + KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); + assert( pIdx->pSchema==pTab->pSchema ); + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + VdbeComment((v, "# %s", pIdx->zName)); + sqlite3VdbeOp3(v, op, i+base, pIdx->tnum, (char*)pKey, P3_KEYINFO_HANDOFF); + } + if( pParse->nTab<=base+i ){ + pParse->nTab = base+i; + } +} + + +#ifdef SQLITE_TEST +/* +** The following global variable is incremented whenever the +** transfer optimization is used. This is used for testing +** purposes only - to make sure the transfer optimization really +** is happening when it is suppose to. +*/ +int sqlite3_xferopt_count; +#endif /* SQLITE_TEST */ + + +#ifndef SQLITE_OMIT_XFER_OPT +/* +** Check to collation names to see if they are compatible. +*/ +static int xferCompatibleCollation(const char *z1, const char *z2){ + if( z1==0 ){ + return z2==0; + } + if( z2==0 ){ + return 0; + } + return sqlite3StrICmp(z1, z2)==0; +} + + +/* +** Check to see if index pSrc is compatible as a source of data +** for index pDest in an insert transfer optimization. The rules +** for a compatible index: +** +** * The index is over the same set of columns +** * The same DESC and ASC markings occurs on all columns +** * The same onError processing (OE_Abort, OE_Ignore, etc) +** * The same collating sequence on each column +*/ +static int xferCompatibleIndex(Index *pDest, Index *pSrc){ + int i; + assert( pDest && pSrc ); + assert( pDest->pTable!=pSrc->pTable ); + if( pDest->nColumn!=pSrc->nColumn ){ + return 0; /* Different number of columns */ + } + if( pDest->onError!=pSrc->onError ){ + return 0; /* Different conflict resolution strategies */ + } + for(i=0; inColumn; i++){ + if( pSrc->aiColumn[i]!=pDest->aiColumn[i] ){ + return 0; /* Different columns indexed */ + } + if( pSrc->aSortOrder[i]!=pDest->aSortOrder[i] ){ + return 0; /* Different sort orders */ + } + if( pSrc->azColl[i]!=pDest->azColl[i] ){ + return 0; /* Different sort orders */ + } + } + + /* If no test above fails then the indices must be compatible */ + return 1; +} + +/* +** Attempt the transfer optimization on INSERTs of the form +** +** INSERT INTO tab1 SELECT * FROM tab2; +** +** This optimization is only attempted if +** +** (1) tab1 and tab2 have identical schemas including all the +** same indices and constraints +** +** (2) tab1 and tab2 are different tables +** +** (3) There must be no triggers on tab1 +** +** (4) The result set of the SELECT statement is "*" +** +** (5) The SELECT statement has no WHERE, HAVING, ORDER BY, GROUP BY, +** or LIMIT clause. +** +** (6) The SELECT statement is a simple (not a compound) select that +** contains only tab2 in its FROM clause +** +** This method for implementing the INSERT transfers raw records from +** tab2 over to tab1. The columns are not decoded. Raw records from +** the indices of tab2 are transfered to tab1 as well. In so doing, +** the resulting tab1 has much less fragmentation. +** +** This routine returns TRUE if the optimization is attempted. If any +** of the conditions above fail so that the optimization should not +** be attempted, then this routine returns FALSE. +*/ +static int xferOptimization( + Parse *pParse, /* Parser context */ + Table *pDest, /* The table we are inserting into */ + Select *pSelect, /* A SELECT statement to use as the data source */ + int onError, /* How to handle constraint errors */ + int iDbDest /* The database of pDest */ +){ + ExprList *pEList; /* The result set of the SELECT */ + Table *pSrc; /* The table in the FROM clause of SELECT */ + Index *pSrcIdx, *pDestIdx; /* Source and destination indices */ + struct SrcList_item *pItem; /* An element of pSelect->pSrc */ + int i; /* Loop counter */ + int iDbSrc; /* The database of pSrc */ + int iSrc, iDest; /* Cursors from source and destination */ + int addr1, addr2; /* Loop addresses */ + int emptyDestTest; /* Address of test for empty pDest */ + int emptySrcTest; /* Address of test for empty pSrc */ + Vdbe *v; /* The VDBE we are building */ + KeyInfo *pKey; /* Key information for an index */ + int counterMem; /* Memory register used by AUTOINC */ + int destHasUniqueIdx = 0; /* True if pDest has a UNIQUE index */ + + if( pSelect==0 ){ + return 0; /* Must be of the form INSERT INTO ... SELECT ... */ + } + if( pDest->pTrigger ){ + return 0; /* tab1 must not have triggers */ + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pDest->isVirtual ){ + return 0; /* tab1 must not be a virtual table */ + } +#endif + if( onError==OE_Default ){ + onError = OE_Abort; + } + if( onError!=OE_Abort && onError!=OE_Rollback ){ + return 0; /* Cannot do OR REPLACE or OR IGNORE or OR FAIL */ + } + assert(pSelect->pSrc); /* allocated even if there is no FROM clause */ + if( pSelect->pSrc->nSrc!=1 ){ + return 0; /* FROM clause must have exactly one term */ + } + if( pSelect->pSrc->a[0].pSelect ){ + return 0; /* FROM clause cannot contain a subquery */ + } + if( pSelect->pWhere ){ + return 0; /* SELECT may not have a WHERE clause */ + } + if( pSelect->pOrderBy ){ + return 0; /* SELECT may not have an ORDER BY clause */ + } + /* Do not need to test for a HAVING clause. If HAVING is present but + ** there is no ORDER BY, we will get an error. */ + if( pSelect->pGroupBy ){ + return 0; /* SELECT may not have a GROUP BY clause */ + } + if( pSelect->pLimit ){ + return 0; /* SELECT may not have a LIMIT clause */ + } + assert( pSelect->pOffset==0 ); /* Must be so if pLimit==0 */ + if( pSelect->pPrior ){ + return 0; /* SELECT may not be a compound query */ + } + if( pSelect->isDistinct ){ + return 0; /* SELECT may not be DISTINCT */ + } + pEList = pSelect->pEList; + assert( pEList!=0 ); + if( pEList->nExpr!=1 ){ + return 0; /* The result set must have exactly one column */ + } + assert( pEList->a[0].pExpr ); + if( pEList->a[0].pExpr->op!=TK_ALL ){ + return 0; /* The result set must be the special operator "*" */ + } + + /* At this point we have established that the statement is of the + ** correct syntactic form to participate in this optimization. Now + ** we have to check the semantics. + */ + pItem = pSelect->pSrc->a; + pSrc = sqlite3LocateTable(pParse, pItem->zName, pItem->zDatabase); + if( pSrc==0 ){ + return 0; /* FROM clause does not contain a real table */ + } + if( pSrc==pDest ){ + return 0; /* tab1 and tab2 may not be the same table */ + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pSrc->isVirtual ){ + return 0; /* tab2 must not be a virtual table */ + } +#endif + if( pSrc->pSelect ){ + return 0; /* tab2 may not be a view */ + } + if( pDest->nCol!=pSrc->nCol ){ + return 0; /* Number of columns must be the same in tab1 and tab2 */ + } + if( pDest->iPKey!=pSrc->iPKey ){ + return 0; /* Both tables must have the same INTEGER PRIMARY KEY */ + } + for(i=0; inCol; i++){ + if( pDest->aCol[i].affinity!=pSrc->aCol[i].affinity ){ + return 0; /* Affinity must be the same on all columns */ + } + if( !xferCompatibleCollation(pDest->aCol[i].zColl, pSrc->aCol[i].zColl) ){ + return 0; /* Collating sequence must be the same on all columns */ + } + if( pDest->aCol[i].notNull && !pSrc->aCol[i].notNull ){ + return 0; /* tab2 must be NOT NULL if tab1 is */ + } + } + for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ + if( pDestIdx->onError!=OE_None ){ + destHasUniqueIdx = 1; + } + for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){ + if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; + } + if( pSrcIdx==0 ){ + return 0; /* pDestIdx has no corresponding index in pSrc */ + } + } +#ifndef SQLITE_OMIT_CHECK + if( pDest->pCheck && !sqlite3ExprCompare(pSrc->pCheck, pDest->pCheck) ){ + return 0; /* Tables have different CHECK constraints. Ticket #2252 */ + } +#endif + + /* If we get this far, it means either: + ** + ** * We can always do the transfer if the table contains an + ** an integer primary key + ** + ** * We can conditionally do the transfer if the destination + ** table is empty. + */ +#ifdef SQLITE_TEST + sqlite3_xferopt_count++; +#endif + iDbSrc = sqlite3SchemaToIndex(pParse->db, pSrc->pSchema); + v = sqlite3GetVdbe(pParse); + sqlite3CodeVerifySchema(pParse, iDbSrc); + iSrc = pParse->nTab++; + iDest = pParse->nTab++; + counterMem = autoIncBegin(pParse, iDbDest, pDest); + sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite); + if( (pDest->iPKey<0 && pDest->pIndex!=0) || destHasUniqueIdx ){ + /* If tables do not have an INTEGER PRIMARY KEY and there + ** are indices to be copied and the destination is not empty, + ** we have to disallow the transfer optimization because the + ** the rowids might change which will mess up indexing. + ** + ** Or if the destination has a UNIQUE index and is not empty, + ** we also disallow the transfer optimization because we cannot + ** insure that all entries in the union of DEST and SRC will be + ** unique. + */ + addr1 = sqlite3VdbeAddOp(v, OP_Rewind, iDest, 0); + emptyDestTest = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); + sqlite3VdbeJumpHere(v, addr1); + }else{ + emptyDestTest = 0; + } + sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead); + emptySrcTest = sqlite3VdbeAddOp(v, OP_Rewind, iSrc, 0); + if( pDest->iPKey>=0 ){ + addr1 = sqlite3VdbeAddOp(v, OP_Rowid, iSrc, 0); + sqlite3VdbeAddOp(v, OP_Dup, 0, 0); + addr2 = sqlite3VdbeAddOp(v, OP_NotExists, iDest, 0); + sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError, + "PRIMARY KEY must be unique", P3_STATIC); + sqlite3VdbeJumpHere(v, addr2); + autoIncStep(pParse, counterMem); + }else if( pDest->pIndex==0 ){ + addr1 = sqlite3VdbeAddOp(v, OP_NewRowid, iDest, 0); + }else{ + addr1 = sqlite3VdbeAddOp(v, OP_Rowid, iSrc, 0); + assert( pDest->autoInc==0 ); + } + sqlite3VdbeAddOp(v, OP_RowData, iSrc, 0); + sqlite3VdbeOp3(v, OP_Insert, iDest, + OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND, + pDest->zName, 0); + sqlite3VdbeAddOp(v, OP_Next, iSrc, addr1); + autoIncEnd(pParse, iDbDest, pDest, counterMem); + for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ + for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){ + if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; + } + assert( pSrcIdx ); + sqlite3VdbeAddOp(v, OP_Close, iSrc, 0); + sqlite3VdbeAddOp(v, OP_Close, iDest, 0); + sqlite3VdbeAddOp(v, OP_Integer, iDbSrc, 0); + pKey = sqlite3IndexKeyinfo(pParse, pSrcIdx); + VdbeComment((v, "# %s", pSrcIdx->zName)); + sqlite3VdbeOp3(v, OP_OpenRead, iSrc, pSrcIdx->tnum, + (char*)pKey, P3_KEYINFO_HANDOFF); + sqlite3VdbeAddOp(v, OP_Integer, iDbDest, 0); + pKey = sqlite3IndexKeyinfo(pParse, pDestIdx); + VdbeComment((v, "# %s", pDestIdx->zName)); + sqlite3VdbeOp3(v, OP_OpenWrite, iDest, pDestIdx->tnum, + (char*)pKey, P3_KEYINFO_HANDOFF); + addr1 = sqlite3VdbeAddOp(v, OP_Rewind, iSrc, 0); + sqlite3VdbeAddOp(v, OP_RowKey, iSrc, 0); + sqlite3VdbeAddOp(v, OP_IdxInsert, iDest, 1); + sqlite3VdbeAddOp(v, OP_Next, iSrc, addr1+1); + sqlite3VdbeJumpHere(v, addr1); + } + sqlite3VdbeJumpHere(v, emptySrcTest); + sqlite3VdbeAddOp(v, OP_Close, iSrc, 0); + sqlite3VdbeAddOp(v, OP_Close, iDest, 0); + if( emptyDestTest ){ + sqlite3VdbeAddOp(v, OP_Halt, SQLITE_OK, 0); + sqlite3VdbeJumpHere(v, emptyDestTest); + sqlite3VdbeAddOp(v, OP_Close, iDest, 0); + return 0; + }else{ + return 1; + } +} +#endif /* SQLITE_OMIT_XFER_OPT */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/journal.c b/libraries/sqlite/unix/sqlite-3.5.1/src/journal.c new file mode 100644 index 0000000000..7cbe5bc9ff --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/journal.c @@ -0,0 +1,238 @@ +/* +** 2007 August 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** @(#) $Id: journal.c,v 1.7 2007/09/06 13:49:37 drh Exp $ +*/ + +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + +/* +** This file implements a special kind of sqlite3_file object used +** by SQLite to create journal files if the atomic-write optimization +** is enabled. +** +** The distinctive characteristic of this sqlite3_file is that the +** actual on disk file is created lazily. When the file is created, +** the caller specifies a buffer size for an in-memory buffer to +** be used to service read() and write() requests. The actual file +** on disk is not created or populated until either: +** +** 1) The in-memory representation grows too large for the allocated +** buffer, or +** 2) The xSync() method is called. +*/ + +#include "sqliteInt.h" + + +/* +** A JournalFile object is a subclass of sqlite3_file used by +** as an open file handle for journal files. +*/ +struct JournalFile { + sqlite3_io_methods *pMethod; /* I/O methods on journal files */ + int nBuf; /* Size of zBuf[] in bytes */ + char *zBuf; /* Space to buffer journal writes */ + int iSize; /* Amount of zBuf[] currently used */ + int flags; /* xOpen flags */ + sqlite3_vfs *pVfs; /* The "real" underlying VFS */ + sqlite3_file *pReal; /* The "real" underlying file descriptor */ + const char *zJournal; /* Name of the journal file */ +}; +typedef struct JournalFile JournalFile; + +/* +** If it does not already exists, create and populate the on-disk file +** for JournalFile p. +*/ +static int createFile(JournalFile *p){ + int rc = SQLITE_OK; + if( !p->pReal ){ + sqlite3_file *pReal = (sqlite3_file *)&p[1]; + rc = sqlite3OsOpen(p->pVfs, p->zJournal, pReal, p->flags, 0); + if( rc==SQLITE_OK ){ + p->pReal = pReal; + if( p->iSize>0 ){ + assert(p->iSize<=p->nBuf); + rc = sqlite3OsWrite(p->pReal, p->zBuf, p->iSize, 0); + } + } + } + return rc; +} + +/* +** Close the file. +*/ +static int jrnlClose(sqlite3_file *pJfd){ + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + sqlite3OsClose(p->pReal); + } + sqlite3_free(p->zBuf); + return SQLITE_OK; +} + +/* +** Read data from the file. +*/ +static int jrnlRead( + sqlite3_file *pJfd, /* The journal file from which to read */ + void *zBuf, /* Put the results here */ + int iAmt, /* Number of bytes to read */ + sqlite_int64 iOfst /* Begin reading at this offset */ +){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + rc = sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst); + }else{ + assert( iAmt+iOfst<=p->iSize ); + memcpy(zBuf, &p->zBuf[iOfst], iAmt); + } + return rc; +} + +/* +** Write data to the file. +*/ +static int jrnlWrite( + sqlite3_file *pJfd, /* The journal file into which to write */ + const void *zBuf, /* Take data to be written from here */ + int iAmt, /* Number of bytes to write */ + sqlite_int64 iOfst /* Begin writing at this offset into the file */ +){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( !p->pReal && (iOfst+iAmt)>p->nBuf ){ + rc = createFile(p); + } + if( rc==SQLITE_OK ){ + if( p->pReal ){ + rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); + }else{ + memcpy(&p->zBuf[iOfst], zBuf, iAmt); + if( p->iSize<(iOfst+iAmt) ){ + p->iSize = (iOfst+iAmt); + } + } + } + return rc; +} + +/* +** Truncate the file. +*/ +static int jrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + rc = sqlite3OsTruncate(p->pReal, size); + }else if( sizeiSize ){ + p->iSize = size; + } + return rc; +} + +/* +** Sync the file. +*/ +static int jrnlSync(sqlite3_file *pJfd, int flags){ + int rc; + JournalFile *p = (JournalFile *)pJfd; + rc = createFile(p); + if( rc==SQLITE_OK ){ + rc = sqlite3OsSync(p->pReal, flags); + } + return rc; +} + +/* +** Query the size of the file in bytes. +*/ +static int jrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + rc = sqlite3OsFileSize(p->pReal, pSize); + }else{ + *pSize = (sqlite_int64) p->iSize; + } + return rc; +} + +/* +** Table of methods for JournalFile sqlite3_file object. +*/ +static struct sqlite3_io_methods JournalFileMethods = { + 1, /* iVersion */ + jrnlClose, /* xClose */ + jrnlRead, /* xRead */ + jrnlWrite, /* xWrite */ + jrnlTruncate, /* xTruncate */ + jrnlSync, /* xSync */ + jrnlFileSize, /* xFileSize */ + 0, /* xLock */ + 0, /* xUnlock */ + 0, /* xCheckReservedLock */ + 0, /* xFileControl */ + 0, /* xSectorSize */ + 0 /* xDeviceCharacteristics */ +}; + +/* +** Open a journal file. +*/ +int sqlite3JournalOpen( + sqlite3_vfs *pVfs, /* The VFS to use for actual file I/O */ + const char *zName, /* Name of the journal file */ + sqlite3_file *pJfd, /* Preallocated, blank file handle */ + int flags, /* Opening flags */ + int nBuf /* Bytes buffered before opening the file */ +){ + JournalFile *p = (JournalFile *)pJfd; + memset(p, 0, sqlite3JournalSize(pVfs)); + if( nBuf>0 ){ + p->zBuf = sqlite3MallocZero(nBuf); + if( !p->zBuf ){ + return SQLITE_NOMEM; + } + }else{ + return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0); + } + p->pMethod = &JournalFileMethods; + p->nBuf = nBuf; + p->flags = flags; + p->zJournal = zName; + p->pVfs = pVfs; + return SQLITE_OK; +} + +/* +** If the argument p points to a JournalFile structure, and the underlying +** file has not yet been created, create it now. +*/ +int sqlite3JournalCreate(sqlite3_file *p){ + if( p->pMethods!=&JournalFileMethods ){ + return SQLITE_OK; + } + return createFile((JournalFile *)p); +} + +/* +** Return the number of bytes required to store a JournalFile that uses vfs +** pVfs to create the underlying on-disk files. +*/ +int sqlite3JournalSize(sqlite3_vfs *pVfs){ + return (pVfs->szOsFile+sizeof(JournalFile)); +} +#endif diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/legacy.c b/libraries/sqlite/unix/sqlite-3.5.1/src/legacy.c new file mode 100644 index 0000000000..c004b89e1d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/legacy.c @@ -0,0 +1,134 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Main file for the SQLite library. The routines in this file +** implement the programmer interface to the library. Routines in +** other files are for internal use by SQLite and should not be +** accessed by users of the library. +** +** $Id: legacy.c,v 1.22 2007/08/29 12:31:26 danielk1977 Exp $ +*/ + +#include "sqliteInt.h" +#include + +/* +** Execute SQL code. Return one of the SQLITE_ success/failure +** codes. Also write an error message into memory obtained from +** malloc() and make *pzErrMsg point to that message. +** +** If the SQL is a query, then for each row in the query result +** the xCallback() function is called. pArg becomes the first +** argument to xCallback(). If xCallback=NULL then no callback +** is invoked, even for queries. +*/ +int sqlite3_exec( + sqlite3 *db, /* The database on which the SQL executes */ + const char *zSql, /* The SQL to be executed */ + sqlite3_callback xCallback, /* Invoke this callback routine */ + void *pArg, /* First argument to xCallback() */ + char **pzErrMsg /* Write error messages here */ +){ + int rc = SQLITE_OK; + const char *zLeftover; + sqlite3_stmt *pStmt = 0; + char **azCols = 0; + + int nRetry = 0; + int nCallback; + + if( zSql==0 ) return SQLITE_OK; + + sqlite3_mutex_enter(db->mutex); + while( (rc==SQLITE_OK || (rc==SQLITE_SCHEMA && (++nRetry)<2)) && zSql[0] ){ + int nCol; + char **azVals = 0; + + pStmt = 0; + rc = sqlite3_prepare(db, zSql, -1, &pStmt, &zLeftover); + assert( rc==SQLITE_OK || pStmt==0 ); + if( rc!=SQLITE_OK ){ + continue; + } + if( !pStmt ){ + /* this happens for a comment or white-space */ + zSql = zLeftover; + continue; + } + + nCallback = 0; + + nCol = sqlite3_column_count(pStmt); + azCols = sqlite3DbMallocZero(db, 2*nCol*sizeof(const char *) + 1); + if( azCols==0 ){ + goto exec_out; + } + + while( 1 ){ + int i; + rc = sqlite3_step(pStmt); + + /* Invoke the callback function if required */ + if( xCallback && (SQLITE_ROW==rc || + (SQLITE_DONE==rc && !nCallback && db->flags&SQLITE_NullCallback)) ){ + if( 0==nCallback ){ + for(i=0; ierrMask)==rc ); + sqlite3_mutex_leave(db->mutex); + return rc; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/loadext.c b/libraries/sqlite/unix/sqlite-3.5.1/src/loadext.c new file mode 100644 index 0000000000..dc6056299b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/loadext.c @@ -0,0 +1,516 @@ +/* +** 2006 June 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to dynamically load extensions into +** the SQLite library. +*/ +#ifndef SQLITE_OMIT_LOAD_EXTENSION + +#define SQLITE_CORE 1 /* Disable the API redefinition in sqlite3ext.h */ +#include "sqlite3ext.h" +#include "sqliteInt.h" +#include +#include + +/* +** Some API routines are omitted when various features are +** excluded from a build of SQLite. Substitute a NULL pointer +** for any missing APIs. +*/ +#ifndef SQLITE_ENABLE_COLUMN_METADATA +# define sqlite3_column_database_name 0 +# define sqlite3_column_database_name16 0 +# define sqlite3_column_table_name 0 +# define sqlite3_column_table_name16 0 +# define sqlite3_column_origin_name 0 +# define sqlite3_column_origin_name16 0 +# define sqlite3_table_column_metadata 0 +#endif + +#ifdef SQLITE_OMIT_AUTHORIZATION +# define sqlite3_set_authorizer 0 +#endif + +#ifdef SQLITE_OMIT_UTF16 +# define sqlite3_bind_text16 0 +# define sqlite3_collation_needed16 0 +# define sqlite3_column_decltype16 0 +# define sqlite3_column_name16 0 +# define sqlite3_column_text16 0 +# define sqlite3_complete16 0 +# define sqlite3_create_collation16 0 +# define sqlite3_create_function16 0 +# define sqlite3_errmsg16 0 +# define sqlite3_open16 0 +# define sqlite3_prepare16 0 +# define sqlite3_prepare16_v2 0 +# define sqlite3_result_error16 0 +# define sqlite3_result_text16 0 +# define sqlite3_result_text16be 0 +# define sqlite3_result_text16le 0 +# define sqlite3_value_text16 0 +# define sqlite3_value_text16be 0 +# define sqlite3_value_text16le 0 +# define sqlite3_column_database_name16 0 +# define sqlite3_column_table_name16 0 +# define sqlite3_column_origin_name16 0 +#endif + +#ifdef SQLITE_OMIT_COMPLETE +# define sqlite3_complete 0 +# define sqlite3_complete16 0 +#endif + +#ifdef SQLITE_OMIT_PROGRESS_CALLBACK +# define sqlite3_progress_handler 0 +#endif + +#ifdef SQLITE_OMIT_VIRTUALTABLE +# define sqlite3_create_module 0 +# define sqlite3_create_module_v2 0 +# define sqlite3_declare_vtab 0 +#endif + +#ifdef SQLITE_OMIT_SHARED_CACHE +# define sqlite3_enable_shared_cache 0 +#endif + +#ifdef SQLITE_OMIT_TRACE +# define sqlite3_profile 0 +# define sqlite3_trace 0 +#endif + +#ifdef SQLITE_OMIT_GET_TABLE +# define sqlite3_free_table 0 +# define sqlite3_get_table 0 +#endif + +#ifdef SQLITE_OMIT_INCRBLOB +#define sqlite3_bind_zeroblob 0 +#define sqlite3_blob_bytes 0 +#define sqlite3_blob_close 0 +#define sqlite3_blob_open 0 +#define sqlite3_blob_read 0 +#define sqlite3_blob_write 0 +#endif + +/* +** The following structure contains pointers to all SQLite API routines. +** A pointer to this structure is passed into extensions when they are +** loaded so that the extension can make calls back into the SQLite +** library. +** +** When adding new APIs, add them to the bottom of this structure +** in order to preserve backwards compatibility. +** +** Extensions that use newer APIs should first call the +** sqlite3_libversion_number() to make sure that the API they +** intend to use is supported by the library. Extensions should +** also check to make sure that the pointer to the function is +** not NULL before calling it. +*/ +const sqlite3_api_routines sqlite3_apis = { + sqlite3_aggregate_context, + sqlite3_aggregate_count, + sqlite3_bind_blob, + sqlite3_bind_double, + sqlite3_bind_int, + sqlite3_bind_int64, + sqlite3_bind_null, + sqlite3_bind_parameter_count, + sqlite3_bind_parameter_index, + sqlite3_bind_parameter_name, + sqlite3_bind_text, + sqlite3_bind_text16, + sqlite3_bind_value, + sqlite3_busy_handler, + sqlite3_busy_timeout, + sqlite3_changes, + sqlite3_close, + sqlite3_collation_needed, + sqlite3_collation_needed16, + sqlite3_column_blob, + sqlite3_column_bytes, + sqlite3_column_bytes16, + sqlite3_column_count, + sqlite3_column_database_name, + sqlite3_column_database_name16, + sqlite3_column_decltype, + sqlite3_column_decltype16, + sqlite3_column_double, + sqlite3_column_int, + sqlite3_column_int64, + sqlite3_column_name, + sqlite3_column_name16, + sqlite3_column_origin_name, + sqlite3_column_origin_name16, + sqlite3_column_table_name, + sqlite3_column_table_name16, + sqlite3_column_text, + sqlite3_column_text16, + sqlite3_column_type, + sqlite3_column_value, + sqlite3_commit_hook, + sqlite3_complete, + sqlite3_complete16, + sqlite3_create_collation, + sqlite3_create_collation16, + sqlite3_create_function, + sqlite3_create_function16, + sqlite3_create_module, + sqlite3_data_count, + sqlite3_db_handle, + sqlite3_declare_vtab, + sqlite3_enable_shared_cache, + sqlite3_errcode, + sqlite3_errmsg, + sqlite3_errmsg16, + sqlite3_exec, + sqlite3_expired, + sqlite3_finalize, + sqlite3_free, + sqlite3_free_table, + sqlite3_get_autocommit, + sqlite3_get_auxdata, + sqlite3_get_table, + 0, /* Was sqlite3_global_recover(), but that function is deprecated */ + sqlite3_interrupt, + sqlite3_last_insert_rowid, + sqlite3_libversion, + sqlite3_libversion_number, + sqlite3_malloc, + sqlite3_mprintf, + sqlite3_open, + sqlite3_open16, + sqlite3_prepare, + sqlite3_prepare16, + sqlite3_profile, + sqlite3_progress_handler, + sqlite3_realloc, + sqlite3_reset, + sqlite3_result_blob, + sqlite3_result_double, + sqlite3_result_error, + sqlite3_result_error16, + sqlite3_result_int, + sqlite3_result_int64, + sqlite3_result_null, + sqlite3_result_text, + sqlite3_result_text16, + sqlite3_result_text16be, + sqlite3_result_text16le, + sqlite3_result_value, + sqlite3_rollback_hook, + sqlite3_set_authorizer, + sqlite3_set_auxdata, + sqlite3_snprintf, + sqlite3_step, + sqlite3_table_column_metadata, + sqlite3_thread_cleanup, + sqlite3_total_changes, + sqlite3_trace, + sqlite3_transfer_bindings, + sqlite3_update_hook, + sqlite3_user_data, + sqlite3_value_blob, + sqlite3_value_bytes, + sqlite3_value_bytes16, + sqlite3_value_double, + sqlite3_value_int, + sqlite3_value_int64, + sqlite3_value_numeric_type, + sqlite3_value_text, + sqlite3_value_text16, + sqlite3_value_text16be, + sqlite3_value_text16le, + sqlite3_value_type, + sqlite3_vmprintf, + /* + ** The original API set ends here. All extensions can call any + ** of the APIs above provided that the pointer is not NULL. But + ** before calling APIs that follow, extension should check the + ** sqlite3_libversion_number() to make sure they are dealing with + ** a library that is new enough to support that API. + ************************************************************************* + */ + sqlite3_overload_function, + + /* + ** Added after 3.3.13 + */ + sqlite3_prepare_v2, + sqlite3_prepare16_v2, + sqlite3_clear_bindings, + + /* + ** Added for 3.4.1 + */ + sqlite3_create_module_v2, + + /* + ** Added for 3.5.0 + */ + sqlite3_bind_zeroblob, + sqlite3_blob_bytes, + sqlite3_blob_close, + sqlite3_blob_open, + sqlite3_blob_read, + sqlite3_blob_write, + sqlite3_create_collation_v2, + sqlite3_file_control, + sqlite3_memory_highwater, + sqlite3_memory_used, +#ifdef SQLITE_MUTEX_NOOP + 0, + 0, + 0, + 0, + 0, +#else + sqlite3_mutex_alloc, + sqlite3_mutex_enter, + sqlite3_mutex_free, + sqlite3_mutex_leave, + sqlite3_mutex_try, +#endif + sqlite3_open_v2, + sqlite3_release_memory, + sqlite3_result_error_nomem, + sqlite3_result_error_toobig, + sqlite3_sleep, + sqlite3_soft_heap_limit, + sqlite3_vfs_find, + sqlite3_vfs_register, + sqlite3_vfs_unregister, +}; + +/* +** Attempt to load an SQLite extension library contained in the file +** zFile. The entry point is zProc. zProc may be 0 in which case a +** default entry point name (sqlite3_extension_init) is used. Use +** of the default name is recommended. +** +** Return SQLITE_OK on success and SQLITE_ERROR if something goes wrong. +** +** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with +** error message text. The calling function should free this memory +** by calling sqlite3_free(). +*/ +static int sqlite3LoadExtension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +){ + sqlite3_vfs *pVfs = db->pVfs; + void *handle; + int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); + char *zErrmsg = 0; + void **aHandle; + + /* Ticket #1863. To avoid a creating security problems for older + ** applications that relink against newer versions of SQLite, the + ** ability to run load_extension is turned off by default. One + ** must call sqlite3_enable_load_extension() to turn on extension + ** loading. Otherwise you get the following error. + */ + if( (db->flags & SQLITE_LoadExtension)==0 ){ + if( pzErrMsg ){ + *pzErrMsg = sqlite3_mprintf("not authorized"); + } + return SQLITE_ERROR; + } + + if( zProc==0 ){ + zProc = "sqlite3_extension_init"; + } + + handle = sqlite3OsDlOpen(pVfs, zFile); + if( handle==0 ){ + if( pzErrMsg ){ + char zErr[256]; + zErr[sizeof(zErr)-1] = '\0'; + sqlite3_snprintf(sizeof(zErr)-1, zErr, + "unable to open shared library [%s]", zFile); + sqlite3OsDlError(pVfs, sizeof(zErr)-1, zErr); + *pzErrMsg = sqlite3DbStrDup(db, zErr); + } + return SQLITE_ERROR; + } + xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) + sqlite3OsDlSym(pVfs, handle, zProc); + if( xInit==0 ){ + if( pzErrMsg ){ + char zErr[256]; + zErr[sizeof(zErr)-1] = '\0'; + sqlite3_snprintf(sizeof(zErr)-1, zErr, + "no entry point [%s] in shared library [%s]", zProc,zFile); + sqlite3OsDlError(pVfs, sizeof(zErr)-1, zErr); + *pzErrMsg = sqlite3DbStrDup(db, zErr); + sqlite3OsDlClose(pVfs, handle); + } + return SQLITE_ERROR; + }else if( xInit(db, &zErrmsg, &sqlite3_apis) ){ + if( pzErrMsg ){ + *pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg); + } + sqlite3_free(zErrmsg); + sqlite3OsDlClose(pVfs, handle); + return SQLITE_ERROR; + } + + /* Append the new shared library handle to the db->aExtension array. */ + db->nExtension++; + aHandle = sqlite3DbMallocZero(db, sizeof(handle)*db->nExtension); + if( aHandle==0 ){ + return SQLITE_NOMEM; + } + if( db->nExtension>0 ){ + memcpy(aHandle, db->aExtension, sizeof(handle)*(db->nExtension-1)); + } + sqlite3_free(db->aExtension); + db->aExtension = aHandle; + + db->aExtension[db->nExtension-1] = handle; + return SQLITE_OK; +} +int sqlite3_load_extension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +){ + int rc; + sqlite3_mutex_enter(db->mutex); + rc = sqlite3LoadExtension(db, zFile, zProc, pzErrMsg); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Call this routine when the database connection is closing in order +** to clean up loaded extensions +*/ +void sqlite3CloseExtensions(sqlite3 *db){ + int i; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inExtension; i++){ + sqlite3OsDlClose(db->pVfs, db->aExtension[i]); + } + sqlite3_free(db->aExtension); +} + +/* +** Enable or disable extension loading. Extension loading is disabled by +** default so as not to open security holes in older applications. +*/ +int sqlite3_enable_load_extension(sqlite3 *db, int onoff){ + sqlite3_mutex_enter(db->mutex); + if( onoff ){ + db->flags |= SQLITE_LoadExtension; + }else{ + db->flags &= ~SQLITE_LoadExtension; + } + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +/* +** The following object holds the list of automatically loaded +** extensions. +** +** This list is shared across threads. The SQLITE_MUTEX_STATIC_MASTER +** mutex must be held while accessing this list. +*/ +static struct { + int nExt; /* Number of entries in aExt[] */ + void **aExt; /* Pointers to the extension init functions */ +} autoext = { 0, 0 }; + + +/* +** Register a statically linked extension that is automatically +** loaded by every new database connection. +*/ +int sqlite3_auto_extension(void *xInit){ + int i; + int rc = SQLITE_OK; + sqlite3_mutex *mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(mutex); + for(i=0; i=autoext.nExt ){ + xInit = 0; + go = 0; + }else{ + xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) + autoext.aExt[i]; + } + sqlite3_mutex_leave(mutex); + if( xInit && xInit(db, &zErrmsg, &sqlite3_apis) ){ + sqlite3Error(db, SQLITE_ERROR, + "automatic extension loading failed: %s", zErrmsg); + go = 0; + rc = SQLITE_ERROR; + sqlite3_free(zErrmsg); + } + } + return rc; +} + +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/main.c b/libraries/sqlite/unix/sqlite-3.5.1/src/main.c new file mode 100644 index 0000000000..f61fe80b25 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/main.c @@ -0,0 +1,1485 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Main file for the SQLite library. The routines in this file +** implement the programmer interface to the library. Routines in +** other files are for internal use by SQLite and should not be +** accessed by users of the library. +** +** $Id: main.c,v 1.406 2007/10/03 21:10:58 drh Exp $ +*/ +#include "sqliteInt.h" +#include + +/* +** The version of the library +*/ +const char sqlite3_version[] = SQLITE_VERSION; +const char *sqlite3_libversion(void){ return sqlite3_version; } +int sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; } +int sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; } + +/* +** If the following function pointer is not NULL and if +** SQLITE_ENABLE_IOTRACE is enabled, then messages describing +** I/O active are written using this function. These messages +** are intended for debugging activity only. +*/ +void (*sqlite3_io_trace)(const char*, ...) = 0; + +/* +** If the following global variable points to a string which is the +** name of a directory, then that directory will be used to store +** temporary files. +** +** See also the "PRAGMA temp_store_directory" SQL command. +*/ +char *sqlite3_temp_directory = 0; + + +/* +** This is the default collating function named "BINARY" which is always +** available. +*/ +static int binCollFunc( + void *NotUsed, + int nKey1, const void *pKey1, + int nKey2, const void *pKey2 +){ + int rc, n; + n = nKey1lastRowid; +} + +/* +** Return the number of changes in the most recent call to sqlite3_exec(). +*/ +int sqlite3_changes(sqlite3 *db){ + return db->nChange; +} + +/* +** Return the number of changes since the database handle was opened. +*/ +int sqlite3_total_changes(sqlite3 *db){ + return db->nTotalChange; +} + +/* +** Close an existing SQLite database +*/ +int sqlite3_close(sqlite3 *db){ + HashElem *i; + int j; + + if( !db ){ + return SQLITE_OK; + } + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + +#ifdef SQLITE_SSE + { + extern void sqlite3SseCleanup(sqlite3*); + sqlite3SseCleanup(db); + } +#endif + + sqlite3ResetInternalSchema(db, 0); + + /* If a transaction is open, the ResetInternalSchema() call above + ** will not have called the xDisconnect() method on any virtual + ** tables in the db->aVTrans[] array. The following sqlite3VtabRollback() + ** call will do so. We need to do this before the check for active + ** SQL statements below, as the v-table implementation may be storing + ** some prepared statements internally. + */ + sqlite3VtabRollback(db); + + /* If there are any outstanding VMs, return SQLITE_BUSY. */ + if( db->pVdbe ){ + sqlite3Error(db, SQLITE_BUSY, + "Unable to close due to unfinalised statements"); + sqlite3_mutex_leave(db->mutex); + return SQLITE_BUSY; + } + assert( !sqlite3SafetyCheck(db) ); + + /* FIX ME: db->magic may be set to SQLITE_MAGIC_CLOSED if the database + ** cannot be opened for some reason. So this routine needs to run in + ** that case. But maybe there should be an extra magic value for the + ** "failed to open" state. + ** + ** TODO: Coverage tests do not test the case where this condition is + ** true. It's hard to see how to cause it without messing with threads. + */ + if( db->magic!=SQLITE_MAGIC_CLOSED && sqlite3SafetyOn(db) ){ + /* printf("DID NOT CLOSE\n"); fflush(stdout); */ + sqlite3_mutex_leave(db->mutex); + return SQLITE_ERROR; + } + + for(j=0; jnDb; j++){ + struct Db *pDb = &db->aDb[j]; + if( pDb->pBt ){ + sqlite3BtreeClose(pDb->pBt); + pDb->pBt = 0; + if( j!=1 ){ + pDb->pSchema = 0; + } + } + } + sqlite3ResetInternalSchema(db, 0); + assert( db->nDb<=2 ); + assert( db->aDb==db->aDbStatic ); + for(i=sqliteHashFirst(&db->aFunc); i; i=sqliteHashNext(i)){ + FuncDef *pFunc, *pNext; + for(pFunc = (FuncDef*)sqliteHashData(i); pFunc; pFunc=pNext){ + pNext = pFunc->pNext; + sqlite3_free(pFunc); + } + } + + for(i=sqliteHashFirst(&db->aCollSeq); i; i=sqliteHashNext(i)){ + CollSeq *pColl = (CollSeq *)sqliteHashData(i); + /* Invoke any destructors registered for collation sequence user data. */ + for(j=0; j<3; j++){ + if( pColl[j].xDel ){ + pColl[j].xDel(pColl[j].pUser); + } + } + sqlite3_free(pColl); + } + sqlite3HashClear(&db->aCollSeq); +#ifndef SQLITE_OMIT_VIRTUALTABLE + for(i=sqliteHashFirst(&db->aModule); i; i=sqliteHashNext(i)){ + Module *pMod = (Module *)sqliteHashData(i); + if( pMod->xDestroy ){ + pMod->xDestroy(pMod->pAux); + } + sqlite3_free(pMod); + } + sqlite3HashClear(&db->aModule); +#endif + + sqlite3HashClear(&db->aFunc); + sqlite3Error(db, SQLITE_OK, 0); /* Deallocates any cached error strings. */ + if( db->pErr ){ + sqlite3ValueFree(db->pErr); + } + sqlite3CloseExtensions(db); + + db->magic = SQLITE_MAGIC_ERROR; + + /* The temp-database schema is allocated differently from the other schema + ** objects (using sqliteMalloc() directly, instead of sqlite3BtreeSchema()). + ** So it needs to be freed here. Todo: Why not roll the temp schema into + ** the same sqliteMalloc() as the one that allocates the database + ** structure? + */ + sqlite3_free(db->aDb[1].pSchema); + sqlite3_mutex_leave(db->mutex); + sqlite3_mutex_free(db->mutex); + sqlite3_free(db); + return SQLITE_OK; +} + +/* +** Rollback all database files. +*/ +void sqlite3RollbackAll(sqlite3 *db){ + int i; + int inTrans = 0; + assert( sqlite3_mutex_held(db->mutex) ); + sqlite3MallocEnterBenignBlock(1); /* Enter benign region */ + for(i=0; inDb; i++){ + if( db->aDb[i].pBt ){ + if( sqlite3BtreeIsInTrans(db->aDb[i].pBt) ){ + inTrans = 1; + } + sqlite3BtreeRollback(db->aDb[i].pBt); + db->aDb[i].inTrans = 0; + } + } + sqlite3VtabRollback(db); + sqlite3MallocLeaveBenignBlock(); /* Leave benign region */ + + if( db->flags&SQLITE_InternChanges ){ + sqlite3ExpirePreparedStatements(db); + sqlite3ResetInternalSchema(db, 0); + } + + /* If one has been configured, invoke the rollback-hook callback */ + if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){ + db->xRollbackCallback(db->pRollbackArg); + } +} + +/* +** Return a static string that describes the kind of error specified in the +** argument. +*/ +const char *sqlite3ErrStr(int rc){ + const char *z; + switch( rc & 0xff ){ + case SQLITE_ROW: + case SQLITE_DONE: + case SQLITE_OK: z = "not an error"; break; + case SQLITE_ERROR: z = "SQL logic error or missing database"; break; + case SQLITE_PERM: z = "access permission denied"; break; + case SQLITE_ABORT: z = "callback requested query abort"; break; + case SQLITE_BUSY: z = "database is locked"; break; + case SQLITE_LOCKED: z = "database table is locked"; break; + case SQLITE_NOMEM: z = "out of memory"; break; + case SQLITE_READONLY: z = "attempt to write a readonly database"; break; + case SQLITE_INTERRUPT: z = "interrupted"; break; + case SQLITE_IOERR: z = "disk I/O error"; break; + case SQLITE_CORRUPT: z = "database disk image is malformed"; break; + case SQLITE_FULL: z = "database or disk is full"; break; + case SQLITE_CANTOPEN: z = "unable to open database file"; break; + case SQLITE_EMPTY: z = "table contains no data"; break; + case SQLITE_SCHEMA: z = "database schema has changed"; break; + case SQLITE_TOOBIG: z = "String or BLOB exceeded size limit"; break; + case SQLITE_CONSTRAINT: z = "constraint failed"; break; + case SQLITE_MISMATCH: z = "datatype mismatch"; break; + case SQLITE_MISUSE: z = "library routine called out of sequence";break; + case SQLITE_NOLFS: z = "kernel lacks large file support"; break; + case SQLITE_AUTH: z = "authorization denied"; break; + case SQLITE_FORMAT: z = "auxiliary database format error"; break; + case SQLITE_RANGE: z = "bind or column index out of range"; break; + case SQLITE_NOTADB: z = "file is encrypted or is not a database";break; + default: z = "unknown error"; break; + } + return z; +} + +/* +** This routine implements a busy callback that sleeps and tries +** again until a timeout value is reached. The timeout value is +** an integer number of milliseconds passed in as the first +** argument. +*/ +static int sqliteDefaultBusyCallback( + void *ptr, /* Database connection */ + int count /* Number of times table has been busy */ +){ +#if OS_WIN || (defined(HAVE_USLEEP) && HAVE_USLEEP) + static const u8 delays[] = + { 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 }; + static const u8 totals[] = + { 0, 1, 3, 8, 18, 33, 53, 78, 103, 128, 178, 228 }; +# define NDELAY (sizeof(delays)/sizeof(delays[0])) + sqlite3 *db = (sqlite3 *)ptr; + int timeout = db->busyTimeout; + int delay, prior; + + assert( count>=0 ); + if( count < NDELAY ){ + delay = delays[count]; + prior = totals[count]; + }else{ + delay = delays[NDELAY-1]; + prior = totals[NDELAY-1] + delay*(count-(NDELAY-1)); + } + if( prior + delay > timeout ){ + delay = timeout - prior; + if( delay<=0 ) return 0; + } + sqlite3OsSleep(db->pVfs, delay*1000); + return 1; +#else + sqlite3 *db = (sqlite3 *)ptr; + int timeout = ((sqlite3 *)ptr)->busyTimeout; + if( (count+1)*1000 > timeout ){ + return 0; + } + sqlite3OsSleep(db->pVfs, 1000000); + return 1; +#endif +} + +/* +** Invoke the given busy handler. +** +** This routine is called when an operation failed with a lock. +** If this routine returns non-zero, the lock is retried. If it +** returns 0, the operation aborts with an SQLITE_BUSY error. +*/ +int sqlite3InvokeBusyHandler(BusyHandler *p){ + int rc; + if( p==0 || p->xFunc==0 || p->nBusy<0 ) return 0; + rc = p->xFunc(p->pArg, p->nBusy); + if( rc==0 ){ + p->nBusy = -1; + }else{ + p->nBusy++; + } + return rc; +} + +/* +** This routine sets the busy callback for an Sqlite database to the +** given callback function with the given argument. +*/ +int sqlite3_busy_handler( + sqlite3 *db, + int (*xBusy)(void*,int), + void *pArg +){ + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + db->busyHandler.xFunc = xBusy; + db->busyHandler.pArg = pArg; + db->busyHandler.nBusy = 0; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK +/* +** This routine sets the progress callback for an Sqlite database to the +** given callback function with the given argument. The progress callback will +** be invoked every nOps opcodes. +*/ +void sqlite3_progress_handler( + sqlite3 *db, + int nOps, + int (*xProgress)(void*), + void *pArg +){ + if( !sqlite3SafetyCheck(db) ){ + sqlite3_mutex_enter(db->mutex); + if( nOps>0 ){ + db->xProgress = xProgress; + db->nProgressOps = nOps; + db->pProgressArg = pArg; + }else{ + db->xProgress = 0; + db->nProgressOps = 0; + db->pProgressArg = 0; + } + sqlite3_mutex_leave(db->mutex); + } +} +#endif + + +/* +** This routine installs a default busy handler that waits for the +** specified number of milliseconds before returning 0. +*/ +int sqlite3_busy_timeout(sqlite3 *db, int ms){ + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + if( ms>0 ){ + db->busyTimeout = ms; + sqlite3_busy_handler(db, sqliteDefaultBusyCallback, (void*)db); + }else{ + sqlite3_busy_handler(db, 0, 0); + } + return SQLITE_OK; +} + +/* +** Cause any pending operation to stop at its earliest opportunity. +*/ +void sqlite3_interrupt(sqlite3 *db){ + if( db && (db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_BUSY) ){ + db->u1.isInterrupted = 1; + } +} + + +/* +** This function is exactly the same as sqlite3_create_function(), except +** that it is designed to be called by internal code. The difference is +** that if a malloc() fails in sqlite3_create_function(), an error code +** is returned and the mallocFailed flag cleared. +*/ +int sqlite3CreateFunc( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int enc, + void *pUserData, + void (*xFunc)(sqlite3_context*,int,sqlite3_value **), + void (*xStep)(sqlite3_context*,int,sqlite3_value **), + void (*xFinal)(sqlite3_context*) +){ + FuncDef *p; + int nName; + + assert( sqlite3_mutex_held(db->mutex) ); + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + if( zFunctionName==0 || + (xFunc && (xFinal || xStep)) || + (!xFunc && (xFinal && !xStep)) || + (!xFunc && (!xFinal && xStep)) || + (nArg<-1 || nArg>127) || + (255<(nName = strlen(zFunctionName))) ){ + sqlite3Error(db, SQLITE_ERROR, "bad parameters"); + return SQLITE_ERROR; + } + +#ifndef SQLITE_OMIT_UTF16 + /* If SQLITE_UTF16 is specified as the encoding type, transform this + ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the + ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. + ** + ** If SQLITE_ANY is specified, add three versions of the function + ** to the hash table. + */ + if( enc==SQLITE_UTF16 ){ + enc = SQLITE_UTF16NATIVE; + }else if( enc==SQLITE_ANY ){ + int rc; + rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8, + pUserData, xFunc, xStep, xFinal); + if( rc==SQLITE_OK ){ + rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE, + pUserData, xFunc, xStep, xFinal); + } + if( rc!=SQLITE_OK ){ + return rc; + } + enc = SQLITE_UTF16BE; + } +#else + enc = SQLITE_UTF8; +#endif + + /* Check if an existing function is being overridden or deleted. If so, + ** and there are active VMs, then return SQLITE_BUSY. If a function + ** is being overridden/deleted but there are no active VMs, allow the + ** operation to continue but invalidate all precompiled statements. + */ + p = sqlite3FindFunction(db, zFunctionName, nName, nArg, enc, 0); + if( p && p->iPrefEnc==enc && p->nArg==nArg ){ + if( db->activeVdbeCnt ){ + sqlite3Error(db, SQLITE_BUSY, + "Unable to delete/modify user-function due to active statements"); + assert( !db->mallocFailed ); + return SQLITE_BUSY; + }else{ + sqlite3ExpirePreparedStatements(db); + } + } + + p = sqlite3FindFunction(db, zFunctionName, nName, nArg, enc, 1); + assert(p || db->mallocFailed); + if( !p ){ + return SQLITE_NOMEM; + } + p->flags = 0; + p->xFunc = xFunc; + p->xStep = xStep; + p->xFinalize = xFinal; + p->pUserData = pUserData; + p->nArg = nArg; + return SQLITE_OK; +} + +/* +** Create new user functions. +*/ +int sqlite3_create_function( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int enc, + void *p, + void (*xFunc)(sqlite3_context*,int,sqlite3_value **), + void (*xStep)(sqlite3_context*,int,sqlite3_value **), + void (*xFinal)(sqlite3_context*) +){ + int rc; + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + rc = sqlite3CreateFunc(db, zFunctionName, nArg, enc, p, xFunc, xStep, xFinal); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#ifndef SQLITE_OMIT_UTF16 +int sqlite3_create_function16( + sqlite3 *db, + const void *zFunctionName, + int nArg, + int eTextRep, + void *p, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +){ + int rc; + char *zFunc8; + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1); + rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xFunc, xStep, xFinal); + sqlite3_free(zFunc8); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} +#endif + + +/* +** Declare that a function has been overloaded by a virtual table. +** +** If the function already exists as a regular global function, then +** this routine is a no-op. If the function does not exist, then create +** a new one that always throws a run-time error. +** +** When virtual tables intend to provide an overloaded function, they +** should call this routine to make sure the global function exists. +** A global function must exist in order for name resolution to work +** properly. +*/ +int sqlite3_overload_function( + sqlite3 *db, + const char *zName, + int nArg +){ + int nName = strlen(zName); + int rc; + sqlite3_mutex_enter(db->mutex); + if( sqlite3FindFunction(db, zName, nName, nArg, SQLITE_UTF8, 0)==0 ){ + sqlite3CreateFunc(db, zName, nArg, SQLITE_UTF8, + 0, sqlite3InvalidFunction, 0, 0); + } + rc = sqlite3ApiExit(db, SQLITE_OK); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#ifndef SQLITE_OMIT_TRACE +/* +** Register a trace function. The pArg from the previously registered trace +** is returned. +** +** A NULL trace function means that no tracing is executes. A non-NULL +** trace is a pointer to a function that is invoked at the start of each +** SQL statement. +*/ +void *sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,const char*), void *pArg){ + void *pOld; + sqlite3_mutex_enter(db->mutex); + pOld = db->pTraceArg; + db->xTrace = xTrace; + db->pTraceArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pOld; +} +/* +** Register a profile function. The pArg from the previously registered +** profile function is returned. +** +** A NULL profile function means that no profiling is executes. A non-NULL +** profile is a pointer to a function that is invoked at the conclusion of +** each SQL statement that is run. +*/ +void *sqlite3_profile( + sqlite3 *db, + void (*xProfile)(void*,const char*,sqlite_uint64), + void *pArg +){ + void *pOld; + sqlite3_mutex_enter(db->mutex); + pOld = db->pProfileArg; + db->xProfile = xProfile; + db->pProfileArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pOld; +} +#endif /* SQLITE_OMIT_TRACE */ + +/*** EXPERIMENTAL *** +** +** Register a function to be invoked when a transaction comments. +** If the invoked function returns non-zero, then the commit becomes a +** rollback. +*/ +void *sqlite3_commit_hook( + sqlite3 *db, /* Attach the hook to this database */ + int (*xCallback)(void*), /* Function to invoke on each commit */ + void *pArg /* Argument to the function */ +){ + void *pOld; + sqlite3_mutex_enter(db->mutex); + pOld = db->pCommitArg; + db->xCommitCallback = xCallback; + db->pCommitArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pOld; +} + +/* +** Register a callback to be invoked each time a row is updated, +** inserted or deleted using this database connection. +*/ +void *sqlite3_update_hook( + sqlite3 *db, /* Attach the hook to this database */ + void (*xCallback)(void*,int,char const *,char const *,sqlite_int64), + void *pArg /* Argument to the function */ +){ + void *pRet; + sqlite3_mutex_enter(db->mutex); + pRet = db->pUpdateArg; + db->xUpdateCallback = xCallback; + db->pUpdateArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +} + +/* +** Register a callback to be invoked each time a transaction is rolled +** back by this database connection. +*/ +void *sqlite3_rollback_hook( + sqlite3 *db, /* Attach the hook to this database */ + void (*xCallback)(void*), /* Callback function */ + void *pArg /* Argument to the function */ +){ + void *pRet; + sqlite3_mutex_enter(db->mutex); + pRet = db->pRollbackArg; + db->xRollbackCallback = xCallback; + db->pRollbackArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +} + +/* +** This routine is called to create a connection to a database BTree +** driver. If zFilename is the name of a file, then that file is +** opened and used. If zFilename is the magic name ":memory:" then +** the database is stored in memory (and is thus forgotten as soon as +** the connection is closed.) If zFilename is NULL then the database +** is a "virtual" database for transient use only and is deleted as +** soon as the connection is closed. +** +** A virtual database can be either a disk file (that is automatically +** deleted when the file is closed) or it an be held entirely in memory, +** depending on the values of the TEMP_STORE compile-time macro and the +** db->temp_store variable, according to the following chart: +** +** TEMP_STORE db->temp_store Location of temporary database +** ---------- -------------- ------------------------------ +** 0 any file +** 1 1 file +** 1 2 memory +** 1 0 file +** 2 1 file +** 2 2 memory +** 2 0 memory +** 3 any memory +*/ +int sqlite3BtreeFactory( + const sqlite3 *db, /* Main database when opening aux otherwise 0 */ + const char *zFilename, /* Name of the file containing the BTree database */ + int omitJournal, /* if TRUE then do not journal this file */ + int nCache, /* How many pages in the page cache */ + int vfsFlags, /* Flags passed through to vfsOpen */ + Btree **ppBtree /* Pointer to new Btree object written here */ +){ + int btFlags = 0; + int rc; + + assert( sqlite3_mutex_held(db->mutex) ); + assert( ppBtree != 0); + if( omitJournal ){ + btFlags |= BTREE_OMIT_JOURNAL; + } + if( db->flags & SQLITE_NoReadlock ){ + btFlags |= BTREE_NO_READLOCK; + } + if( zFilename==0 ){ +#if TEMP_STORE==0 + /* Do nothing */ +#endif +#ifndef SQLITE_OMIT_MEMORYDB +#if TEMP_STORE==1 + if( db->temp_store==2 ) zFilename = ":memory:"; +#endif +#if TEMP_STORE==2 + if( db->temp_store!=1 ) zFilename = ":memory:"; +#endif +#if TEMP_STORE==3 + zFilename = ":memory:"; +#endif +#endif /* SQLITE_OMIT_MEMORYDB */ + } + + if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (zFilename==0 || *zFilename==0) ){ + vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB; + } + rc = sqlite3BtreeOpen(zFilename, (sqlite3 *)db, ppBtree, btFlags, vfsFlags); + if( rc==SQLITE_OK ){ + sqlite3BtreeSetBusyHandler(*ppBtree, (void*)&db->busyHandler); + sqlite3BtreeSetCacheSize(*ppBtree, nCache); + } + return rc; +} + +/* +** Return UTF-8 encoded English language explanation of the most recent +** error. +*/ +const char *sqlite3_errmsg(sqlite3 *db){ + const char *z; + if( !db ){ + return sqlite3ErrStr(SQLITE_NOMEM); + } + if( sqlite3SafetyCheck(db) || db->errCode==SQLITE_MISUSE ){ + return sqlite3ErrStr(SQLITE_MISUSE); + } + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + z = (char*)sqlite3_value_text(db->pErr); + if( z==0 ){ + z = sqlite3ErrStr(db->errCode); + } + sqlite3_mutex_leave(db->mutex); + return z; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** Return UTF-16 encoded English language explanation of the most recent +** error. +*/ +const void *sqlite3_errmsg16(sqlite3 *db){ + /* Because all the characters in the string are in the unicode + ** range 0x00-0xFF, if we pad the big-endian string with a + ** zero byte, we can obtain the little-endian string with + ** &big_endian[1]. + */ + static const char outOfMemBe[] = { + 0, 'o', 0, 'u', 0, 't', 0, ' ', + 0, 'o', 0, 'f', 0, ' ', + 0, 'm', 0, 'e', 0, 'm', 0, 'o', 0, 'r', 0, 'y', 0, 0, 0 + }; + static const char misuseBe [] = { + 0, 'l', 0, 'i', 0, 'b', 0, 'r', 0, 'a', 0, 'r', 0, 'y', 0, ' ', + 0, 'r', 0, 'o', 0, 'u', 0, 't', 0, 'i', 0, 'n', 0, 'e', 0, ' ', + 0, 'c', 0, 'a', 0, 'l', 0, 'l', 0, 'e', 0, 'd', 0, ' ', + 0, 'o', 0, 'u', 0, 't', 0, ' ', + 0, 'o', 0, 'f', 0, ' ', + 0, 's', 0, 'e', 0, 'q', 0, 'u', 0, 'e', 0, 'n', 0, 'c', 0, 'e', 0, 0, 0 + }; + + const void *z; + if( !db ){ + return (void *)(&outOfMemBe[SQLITE_UTF16NATIVE==SQLITE_UTF16LE?1:0]); + } + if( sqlite3SafetyCheck(db) || db->errCode==SQLITE_MISUSE ){ + return (void *)(&misuseBe[SQLITE_UTF16NATIVE==SQLITE_UTF16LE?1:0]); + } + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + z = sqlite3_value_text16(db->pErr); + if( z==0 ){ + sqlite3ValueSetStr(db->pErr, -1, sqlite3ErrStr(db->errCode), + SQLITE_UTF8, SQLITE_STATIC); + z = sqlite3_value_text16(db->pErr); + } + sqlite3ApiExit(0, 0); + sqlite3_mutex_leave(db->mutex); + return z; +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** Return the most recent error code generated by an SQLite routine. If NULL is +** passed to this function, we assume a malloc() failed during sqlite3_open(). +*/ +int sqlite3_errcode(sqlite3 *db){ + if( !db || db->mallocFailed ){ + return SQLITE_NOMEM; + } + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + return db->errCode & db->errMask; +} + +/* +** Create a new collating function for database "db". The name is zName +** and the encoding is enc. +*/ +static int createCollation( + sqlite3* db, + const char *zName, + int enc, + void* pCtx, + int(*xCompare)(void*,int,const void*,int,const void*), + void(*xDel)(void*) +){ + CollSeq *pColl; + int enc2; + + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + assert( sqlite3_mutex_held(db->mutex) ); + + /* If SQLITE_UTF16 is specified as the encoding type, transform this + ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the + ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. + */ + enc2 = enc & ~SQLITE_UTF16_ALIGNED; + if( enc2==SQLITE_UTF16 ){ + enc2 = SQLITE_UTF16NATIVE; + } + + if( (enc2&~3)!=0 ){ + sqlite3Error(db, SQLITE_ERROR, "unknown encoding"); + return SQLITE_ERROR; + } + + /* Check if this call is removing or replacing an existing collation + ** sequence. If so, and there are active VMs, return busy. If there + ** are no active VMs, invalidate any pre-compiled statements. + */ + pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, strlen(zName), 0); + if( pColl && pColl->xCmp ){ + if( db->activeVdbeCnt ){ + sqlite3Error(db, SQLITE_BUSY, + "Unable to delete/modify collation sequence due to active statements"); + return SQLITE_BUSY; + } + sqlite3ExpirePreparedStatements(db); + + /* If collation sequence pColl was created directly by a call to + ** sqlite3_create_collation, and not generated by synthCollSeq(), + ** then any copies made by synthCollSeq() need to be invalidated. + ** Also, collation destructor - CollSeq.xDel() - function may need + ** to be called. + */ + if( (pColl->enc & ~SQLITE_UTF16_ALIGNED)==enc2 ){ + CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName, strlen(zName)); + int j; + for(j=0; j<3; j++){ + CollSeq *p = &aColl[j]; + if( p->enc==pColl->enc ){ + if( p->xDel ){ + p->xDel(p->pUser); + } + p->xCmp = 0; + } + } + } + } + + pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, strlen(zName), 1); + if( pColl ){ + pColl->xCmp = xCompare; + pColl->pUser = pCtx; + pColl->xDel = xDel; + pColl->enc = enc2 | (enc & SQLITE_UTF16_ALIGNED); + } + sqlite3Error(db, SQLITE_OK, 0); + return SQLITE_OK; +} + + +/* +** This routine does the work of opening a database on behalf of +** sqlite3_open() and sqlite3_open16(). The database filename "zFilename" +** is UTF-8 encoded. +*/ +static int openDatabase( + const char *zFilename, /* Database filename UTF-8 encoded */ + sqlite3 **ppDb, /* OUT: Returned database handle */ + unsigned flags, /* Operational flags */ + const char *zVfs /* Name of the VFS to use */ +){ + sqlite3 *db; + int rc; + CollSeq *pColl; + + /* Allocate the sqlite data structure */ + db = sqlite3MallocZero( sizeof(sqlite3) ); + if( db==0 ) goto opendb_out; + db->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_RECURSIVE); + if( db->mutex==0 ){ + sqlite3_free(db); + db = 0; + goto opendb_out; + } + sqlite3_mutex_enter(db->mutex); + db->errMask = 0xff; + db->priorNewRowid = 0; + db->nDb = 2; + db->magic = SQLITE_MAGIC_BUSY; + db->aDb = db->aDbStatic; + db->autoCommit = 1; + db->flags |= SQLITE_ShortColNames +#if SQLITE_DEFAULT_FILE_FORMAT<4 + | SQLITE_LegacyFileFmt +#endif +#ifdef SQLITE_ENABLE_LOAD_EXTENSION + | SQLITE_LoadExtension +#endif + ; + sqlite3HashInit(&db->aFunc, SQLITE_HASH_STRING, 0); + sqlite3HashInit(&db->aCollSeq, SQLITE_HASH_STRING, 0); +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3HashInit(&db->aModule, SQLITE_HASH_STRING, 0); +#endif + + db->pVfs = sqlite3_vfs_find(zVfs); + if( !db->pVfs ){ + rc = SQLITE_ERROR; + db->magic = SQLITE_MAGIC_CLOSED; + sqlite3Error(db, rc, "no such vfs: %s", (zVfs?zVfs:"(null)")); + goto opendb_out; + } + + /* Add the default collation sequence BINARY. BINARY works for both UTF-8 + ** and UTF-16, so add a version for each to avoid any unnecessary + ** conversions. The only error that can occur here is a malloc() failure. + */ + if( createCollation(db, "BINARY", SQLITE_UTF8, 0, binCollFunc, 0) || + createCollation(db, "BINARY", SQLITE_UTF16BE, 0, binCollFunc, 0) || + createCollation(db, "BINARY", SQLITE_UTF16LE, 0, binCollFunc, 0) || + (db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 6, 0))==0 + ){ + assert( db->mallocFailed ); + db->magic = SQLITE_MAGIC_CLOSED; + goto opendb_out; + } + + /* Also add a UTF-8 case-insensitive collation sequence. */ + createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0); + + /* Set flags on the built-in collating sequences */ + db->pDfltColl->type = SQLITE_COLL_BINARY; + pColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "NOCASE", 6, 0); + if( pColl ){ + pColl->type = SQLITE_COLL_NOCASE; + } + + /* Open the backend database driver */ + db->openFlags = flags; + rc = sqlite3BtreeFactory(db, zFilename, 0, SQLITE_DEFAULT_CACHE_SIZE, + flags | SQLITE_OPEN_MAIN_DB, + &db->aDb[0].pBt); + if( rc!=SQLITE_OK ){ + sqlite3Error(db, rc, 0); + db->magic = SQLITE_MAGIC_CLOSED; + goto opendb_out; + } + db->aDb[0].pSchema = sqlite3SchemaGet(db, db->aDb[0].pBt); + db->aDb[1].pSchema = sqlite3SchemaGet(db, 0); + + + /* The default safety_level for the main database is 'full'; for the temp + ** database it is 'NONE'. This matches the pager layer defaults. + */ + db->aDb[0].zName = "main"; + db->aDb[0].safety_level = 3; +#ifndef SQLITE_OMIT_TEMPDB + db->aDb[1].zName = "temp"; + db->aDb[1].safety_level = 1; +#endif + + db->magic = SQLITE_MAGIC_OPEN; + if( db->mallocFailed ){ + goto opendb_out; + } + + /* Register all built-in functions, but do not attempt to read the + ** database schema yet. This is delayed until the first time the database + ** is accessed. + */ + sqlite3Error(db, SQLITE_OK, 0); + sqlite3RegisterBuiltinFunctions(db); + + /* Load automatic extensions - extensions that have been registered + ** using the sqlite3_automatic_extension() API. + */ + (void)sqlite3AutoLoadExtensions(db); + if( sqlite3_errcode(db)!=SQLITE_OK ){ + goto opendb_out; + } + +#ifdef SQLITE_ENABLE_FTS1 + if( !db->mallocFailed ){ + extern int sqlite3Fts1Init(sqlite3*); + rc = sqlite3Fts1Init(db); + } +#endif + +#ifdef SQLITE_ENABLE_FTS2 + if( !db->mallocFailed && rc==SQLITE_OK ){ + extern int sqlite3Fts2Init(sqlite3*); + rc = sqlite3Fts2Init(db); + } +#endif + +#ifdef SQLITE_ENABLE_FTS3 + if( !db->mallocFailed && rc==SQLITE_OK ){ + extern int sqlite3Fts3Init(sqlite3*); + rc = sqlite3Fts3Init(db); + } +#endif + +#ifdef SQLITE_ENABLE_ICU + if( !db->mallocFailed && rc==SQLITE_OK ){ + extern int sqlite3IcuInit(sqlite3*); + rc = sqlite3IcuInit(db); + } +#endif + sqlite3Error(db, rc, 0); + + /* -DSQLITE_DEFAULT_LOCKING_MODE=1 makes EXCLUSIVE the default locking + ** mode. -DSQLITE_DEFAULT_LOCKING_MODE=0 make NORMAL the default locking + ** mode. Doing nothing at all also makes NORMAL the default. + */ +#ifdef SQLITE_DEFAULT_LOCKING_MODE + db->dfltLockMode = SQLITE_DEFAULT_LOCKING_MODE; + sqlite3PagerLockingMode(sqlite3BtreePager(db->aDb[0].pBt), + SQLITE_DEFAULT_LOCKING_MODE); +#endif + +opendb_out: + if( db && db->mutex ){ + sqlite3_mutex_leave(db->mutex); + } + if( SQLITE_NOMEM==(rc = sqlite3_errcode(db)) ){ + sqlite3_close(db); + db = 0; + } + *ppDb = db; + return sqlite3ApiExit(0, rc); +} + +/* +** Open a new database handle. +*/ +int sqlite3_open( + const char *zFilename, + sqlite3 **ppDb +){ + return openDatabase(zFilename, ppDb, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); +} +int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +){ + return openDatabase(filename, ppDb, flags, zVfs); +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** Open a new database handle. +*/ +int sqlite3_open16( + const void *zFilename, + sqlite3 **ppDb +){ + char const *zFilename8; /* zFilename encoded in UTF-8 instead of UTF-16 */ + sqlite3_value *pVal; + int rc = SQLITE_NOMEM; + + assert( zFilename ); + assert( ppDb ); + *ppDb = 0; + pVal = sqlite3ValueNew(0); + sqlite3ValueSetStr(pVal, -1, zFilename, SQLITE_UTF16NATIVE, SQLITE_STATIC); + zFilename8 = sqlite3ValueText(pVal, SQLITE_UTF8); + if( zFilename8 ){ + rc = openDatabase(zFilename8, ppDb, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); + if( rc==SQLITE_OK && *ppDb ){ + rc = sqlite3_exec(*ppDb, "PRAGMA encoding = 'UTF-16'", 0, 0, 0); + if( rc!=SQLITE_OK ){ + sqlite3_close(*ppDb); + *ppDb = 0; + } + } + } + sqlite3ValueFree(pVal); + + return sqlite3ApiExit(0, rc); +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** Register a new collation sequence with the database handle db. +*/ +int sqlite3_create_collation( + sqlite3* db, + const char *zName, + int enc, + void* pCtx, + int(*xCompare)(void*,int,const void*,int,const void*) +){ + int rc; + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + rc = createCollation(db, zName, enc, pCtx, xCompare, 0); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Register a new collation sequence with the database handle db. +*/ +int sqlite3_create_collation_v2( + sqlite3* db, + const char *zName, + int enc, + void* pCtx, + int(*xCompare)(void*,int,const void*,int,const void*), + void(*xDel)(void*) +){ + int rc; + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + rc = createCollation(db, zName, enc, pCtx, xCompare, xDel); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** Register a new collation sequence with the database handle db. +*/ +int sqlite3_create_collation16( + sqlite3* db, + const char *zName, + int enc, + void* pCtx, + int(*xCompare)(void*,int,const void*,int,const void*) +){ + int rc = SQLITE_OK; + char *zName8; + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + zName8 = sqlite3Utf16to8(db, zName, -1); + if( zName8 ){ + rc = createCollation(db, zName8, enc, pCtx, xCompare, 0); + sqlite3_free(zName8); + } + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** Register a collation sequence factory callback with the database handle +** db. Replace any previously installed collation sequence factory. +*/ +int sqlite3_collation_needed( + sqlite3 *db, + void *pCollNeededArg, + void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*) +){ + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + db->xCollNeeded = xCollNeeded; + db->xCollNeeded16 = 0; + db->pCollNeededArg = pCollNeededArg; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** Register a collation sequence factory callback with the database handle +** db. Replace any previously installed collation sequence factory. +*/ +int sqlite3_collation_needed16( + sqlite3 *db, + void *pCollNeededArg, + void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*) +){ + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + db->xCollNeeded = 0; + db->xCollNeeded16 = xCollNeeded16; + db->pCollNeededArg = pCollNeededArg; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} +#endif /* SQLITE_OMIT_UTF16 */ + +#ifndef SQLITE_OMIT_GLOBALRECOVER +/* +** This function is now an anachronism. It used to be used to recover from a +** malloc() failure, but SQLite now does this automatically. +*/ +int sqlite3_global_recover(){ + return SQLITE_OK; +} +#endif + +/* +** Test to see whether or not the database connection is in autocommit +** mode. Return TRUE if it is and FALSE if not. Autocommit mode is on +** by default. Autocommit is disabled by a BEGIN statement and reenabled +** by the next COMMIT or ROLLBACK. +** +******* THIS IS AN EXPERIMENTAL API AND IS SUBJECT TO CHANGE ****** +*/ +int sqlite3_get_autocommit(sqlite3 *db){ + return db->autoCommit; +} + +#ifdef SQLITE_DEBUG +/* +** The following routine is subtituted for constant SQLITE_CORRUPT in +** debugging builds. This provides a way to set a breakpoint for when +** corruption is first detected. +*/ +int sqlite3Corrupt(void){ + return SQLITE_CORRUPT; +} +#endif + +/* +** This is a convenience routine that makes sure that all thread-specific +** data for this thread has been deallocated. +** +** SQLite no longer uses thread-specific data so this routine is now a +** no-op. It is retained for historical compatibility. +*/ +void sqlite3_thread_cleanup(void){ +} + +/* +** Return meta information about a specific column of a database table. +** See comment in sqlite3.h (sqlite.h.in) for details. +*/ +#ifdef SQLITE_ENABLE_COLUMN_METADATA +int sqlite3_table_column_metadata( + sqlite3 *db, /* Connection handle */ + const char *zDbName, /* Database name or NULL */ + const char *zTableName, /* Table name */ + const char *zColumnName, /* Column name */ + char const **pzDataType, /* OUTPUT: Declared data type */ + char const **pzCollSeq, /* OUTPUT: Collation sequence name */ + int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ + int *pPrimaryKey, /* OUTPUT: True if column part of PK */ + int *pAutoinc /* OUTPUT: True if colums is auto-increment */ +){ + int rc; + char *zErrMsg = 0; + Table *pTab = 0; + Column *pCol = 0; + int iCol; + + char const *zDataType = 0; + char const *zCollSeq = 0; + int notnull = 0; + int primarykey = 0; + int autoinc = 0; + + /* Ensure the database schema has been loaded */ + if( sqlite3SafetyOn(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + rc = sqlite3Init(db, &zErrMsg); + if( SQLITE_OK!=rc ){ + goto error_out; + } + + /* Locate the table in question */ + pTab = sqlite3FindTable(db, zTableName, zDbName); + if( !pTab || pTab->pSelect ){ + pTab = 0; + goto error_out; + } + + /* Find the column for which info is requested */ + if( sqlite3IsRowid(zColumnName) ){ + iCol = pTab->iPKey; + if( iCol>=0 ){ + pCol = &pTab->aCol[iCol]; + } + }else{ + for(iCol=0; iColnCol; iCol++){ + pCol = &pTab->aCol[iCol]; + if( 0==sqlite3StrICmp(pCol->zName, zColumnName) ){ + break; + } + } + if( iCol==pTab->nCol ){ + pTab = 0; + goto error_out; + } + } + + /* The following block stores the meta information that will be returned + ** to the caller in local variables zDataType, zCollSeq, notnull, primarykey + ** and autoinc. At this point there are two possibilities: + ** + ** 1. The specified column name was rowid", "oid" or "_rowid_" + ** and there is no explicitly declared IPK column. + ** + ** 2. The table is not a view and the column name identified an + ** explicitly declared column. Copy meta information from *pCol. + */ + if( pCol ){ + zDataType = pCol->zType; + zCollSeq = pCol->zColl; + notnull = (pCol->notNull?1:0); + primarykey = (pCol->isPrimKey?1:0); + autoinc = ((pTab->iPKey==iCol && pTab->autoInc)?1:0); + }else{ + zDataType = "INTEGER"; + primarykey = 1; + } + if( !zCollSeq ){ + zCollSeq = "BINARY"; + } + +error_out: + if( sqlite3SafetyOff(db) ){ + rc = SQLITE_MISUSE; + } + + /* Whether the function call succeeded or failed, set the output parameters + ** to whatever their local counterparts contain. If an error did occur, + ** this has the effect of zeroing all output parameters. + */ + if( pzDataType ) *pzDataType = zDataType; + if( pzCollSeq ) *pzCollSeq = zCollSeq; + if( pNotNull ) *pNotNull = notnull; + if( pPrimaryKey ) *pPrimaryKey = primarykey; + if( pAutoinc ) *pAutoinc = autoinc; + + if( SQLITE_OK==rc && !pTab ){ + sqlite3SetString(&zErrMsg, "no such table column: ", zTableName, ".", + zColumnName, 0); + rc = SQLITE_ERROR; + } + sqlite3Error(db, rc, (zErrMsg?"%s":0), zErrMsg); + sqlite3_free(zErrMsg); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} +#endif + +/* +** Sleep for a little while. Return the amount of time slept. +*/ +int sqlite3_sleep(int ms){ + sqlite3_vfs *pVfs; + int rc; + pVfs = sqlite3_vfs_find(0); + + /* This function works in milliseconds, but the underlying OsSleep() + ** API uses microseconds. Hence the 1000's. + */ + rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); + return rc; +} + +/* +** Enable or disable the extended result codes. +*/ +int sqlite3_extended_result_codes(sqlite3 *db, int onoff){ + sqlite3_mutex_enter(db->mutex); + db->errMask = onoff ? 0xffffffff : 0xff; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +/* +** Invoke the xFileControl method on a particular database. +*/ +int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){ + int rc = SQLITE_ERROR; + int iDb; + sqlite3_mutex_enter(db->mutex); + if( zDbName==0 ){ + iDb = 0; + }else{ + for(iDb=0; iDbnDb; iDb++){ + if( strcmp(db->aDb[iDb].zName, zDbName)==0 ) break; + } + } + if( iDbnDb ){ + Btree *pBtree = db->aDb[iDb].pBt; + if( pBtree ){ + Pager *pPager; + sqlite3BtreeEnter(pBtree); + pPager = sqlite3BtreePager(pBtree); + if( pPager ){ + sqlite3_file *fd = sqlite3PagerFile(pPager); + if( fd ){ + rc = sqlite3OsFileControl(fd, op, pArg); + } + } + sqlite3BtreeLeave(pBtree); + } + } + sqlite3_mutex_leave(db->mutex); + return rc; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/malloc.c b/libraries/sqlite/unix/sqlite-3.5.1/src/malloc.c new file mode 100644 index 0000000000..bc321ab9ae --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/malloc.c @@ -0,0 +1,240 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Memory allocation functions used throughout sqlite. +** +** +** $Id: malloc.c,v 1.13 2007/08/29 14:06:23 danielk1977 Exp $ +*/ +#include "sqliteInt.h" +#include +#include + +/* +** This routine runs when the memory allocator sees that the +** total memory allocation is about to exceed the soft heap +** limit. +*/ +static void softHeapLimitEnforcer( + void *NotUsed, + sqlite3_int64 inUse, + int allocSize +){ + sqlite3_release_memory(allocSize); +} + +/* +** Set the soft heap-size limit for the current thread. Passing a +** zero or negative value indicates no limit. +*/ +void sqlite3_soft_heap_limit(int n){ + sqlite3_uint64 iLimit; + int overage; + if( n<0 ){ + iLimit = 0; + }else{ + iLimit = n; + } + if( iLimit>0 ){ + sqlite3_memory_alarm(softHeapLimitEnforcer, 0, iLimit); + }else{ + sqlite3_memory_alarm(0, 0, 0); + } + overage = sqlite3_memory_used() - n; + if( overage>0 ){ + sqlite3_release_memory(overage); + } +} + +/* +** Release memory held by SQLite instances created by the current thread. +*/ +int sqlite3_release_memory(int n){ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + return sqlite3PagerReleaseMemory(n); +#else + return SQLITE_OK; +#endif +} + + +/* +** Allocate and zero memory. +*/ +void *sqlite3MallocZero(unsigned n){ + void *p = sqlite3_malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} + +/* +** Allocate and zero memory. If the allocation fails, make +** the mallocFailed flag in the connection pointer. +*/ +void *sqlite3DbMallocZero(sqlite3 *db, unsigned n){ + void *p = sqlite3DbMallocRaw(db, n); + if( p ){ + memset(p, 0, n); + } + return p; +} + +/* +** Allocate and zero memory. If the allocation fails, make +** the mallocFailed flag in the connection pointer. +*/ +void *sqlite3DbMallocRaw(sqlite3 *db, unsigned n){ + void *p = 0; + if( !db || db->mallocFailed==0 ){ + p = sqlite3_malloc(n); + if( !p && db ){ + db->mallocFailed = 1; + } + } + return p; +} + +/* +** Resize the block of memory pointed to by p to n bytes. If the +** resize fails, set the mallocFailed flag inthe connection object. +*/ +void *sqlite3DbRealloc(sqlite3 *db, void *p, int n){ + void *pNew = 0; + if( db->mallocFailed==0 ){ + pNew = sqlite3_realloc(p, n); + if( !pNew ){ + db->mallocFailed = 1; + } + } + return pNew; +} + +/* +** Attempt to reallocate p. If the reallocation fails, then free p +** and set the mallocFailed flag in the database connection. +*/ +void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, int n){ + void *pNew; + pNew = sqlite3DbRealloc(db, p, n); + if( !pNew ){ + sqlite3_free(p); + } + return pNew; +} + +/* +** Make a copy of a string in memory obtained from sqliteMalloc(). These +** functions call sqlite3MallocRaw() directly instead of sqliteMalloc(). This +** is because when memory debugging is turned on, these two functions are +** called via macros that record the current file and line number in the +** ThreadData structure. +*/ +char *sqlite3StrDup(const char *z){ + char *zNew; + int n; + if( z==0 ) return 0; + n = strlen(z)+1; + zNew = sqlite3_malloc(n); + if( zNew ) memcpy(zNew, z, n); + return zNew; +} +char *sqlite3StrNDup(const char *z, int n){ + char *zNew; + if( z==0 ) return 0; + zNew = sqlite3_malloc(n+1); + if( zNew ){ + memcpy(zNew, z, n); + zNew[n] = 0; + } + return zNew; +} + +char *sqlite3DbStrDup(sqlite3 *db, const char *z){ + char *zNew = sqlite3StrDup(z); + if( z && !zNew ){ + db->mallocFailed = 1; + } + return zNew; +} +char *sqlite3DbStrNDup(sqlite3 *db, const char *z, int n){ + char *zNew = sqlite3StrNDup(z, n); + if( z && !zNew ){ + db->mallocFailed = 1; + } + return zNew; +} + +/* +** Create a string from the 2nd and subsequent arguments (up to the +** first NULL argument), store the string in memory obtained from +** sqliteMalloc() and make the pointer indicated by the 1st argument +** point to that string. The 1st argument must either be NULL or +** point to memory obtained from sqliteMalloc(). +*/ +void sqlite3SetString(char **pz, ...){ + va_list ap; + int nByte; + const char *z; + char *zResult; + + assert( pz!=0 ); + nByte = 1; + va_start(ap, pz); + while( (z = va_arg(ap, const char*))!=0 ){ + nByte += strlen(z); + } + va_end(ap); + sqlite3_free(*pz); + *pz = zResult = sqlite3_malloc(nByte); + if( zResult==0 ){ + return; + } + *zResult = 0; + va_start(ap, pz); + while( (z = va_arg(ap, const char*))!=0 ){ + int n = strlen(z); + memcpy(zResult, z, n); + zResult += n; + } + zResult[0] = 0; + va_end(ap); +} + + +/* +** This function must be called before exiting any API function (i.e. +** returning control to the user) that has called sqlite3_malloc or +** sqlite3_realloc. +** +** The returned value is normally a copy of the second argument to this +** function. However, if a malloc() failure has occured since the previous +** invocation SQLITE_NOMEM is returned instead. +** +** If the first argument, db, is not NULL and a malloc() error has occured, +** then the connection error-code (the value returned by sqlite3_errcode()) +** is set to SQLITE_NOMEM. +*/ +int sqlite3ApiExit(sqlite3* db, int rc){ + /* If the db handle is not NULL, then we must hold the connection handle + ** mutex here. Otherwise the read (and possible write) of db->mallocFailed + ** is unsafe, as is the call to sqlite3Error(). + */ + assert( !db || sqlite3_mutex_held(db->mutex) ); + if( db && db->mallocFailed ){ + sqlite3Error(db, SQLITE_NOMEM, 0); + db->mallocFailed = 0; + rc = SQLITE_NOMEM; + } + return rc & (db ? db->errMask : 0xff); +} + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/mem1.c b/libraries/sqlite/unix/sqlite-3.5.1/src/mem1.c new file mode 100644 index 0000000000..1e9fcfa145 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/mem1.c @@ -0,0 +1,229 @@ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement a memory +** allocation subsystem for use by SQLite. +** +** $Id: mem1.c,v 1.10 2007/09/02 17:50:35 drh Exp $ +*/ + +/* +** This version of the memory allocator is the default. It is +** used when no other memory allocator is specified using compile-time +** macros. +*/ +#if !defined(SQLITE_MEMDEBUG) && !defined(SQLITE_OMIT_MEMORY_ALLOCATION) + +/* +** We will eventually construct multiple memory allocation subsystems +** suitable for use in various contexts: +** +** * Normal multi-threaded builds +** * Normal single-threaded builds +** * Debugging builds +** +** This initial version is suitable for use in normal multi-threaded +** builds. We envision that alternative versions will be stored in +** separate source files. #ifdefs will be used to select the code from +** one of the various memN.c source files for use in any given build. +*/ +#include "sqliteInt.h" + +/* +** All of the static variables used by this module are collected +** into a single structure named "mem". This is to keep the +** static variables organized and to reduce namespace pollution +** when this module is combined with other in the amalgamation. +*/ +static struct { + /* + ** The alarm callback and its arguments. The mem.mutex lock will + ** be held while the callback is running. Recursive calls into + ** the memory subsystem are allowed, but no new callbacks will be + ** issued. The alarmBusy variable is set to prevent recursive + ** callbacks. + */ + sqlite3_int64 alarmThreshold; + void (*alarmCallback)(void*, sqlite3_int64,int); + void *alarmArg; + int alarmBusy; + + /* + ** Mutex to control access to the memory allocation subsystem. + */ + sqlite3_mutex *mutex; + + /* + ** Current allocation and high-water mark. + */ + sqlite3_int64 nowUsed; + sqlite3_int64 mxUsed; + + +} mem; + +/* +** Enter the mutex mem.mutex. Allocate it if it is not already allocated. +*/ +static void enterMem(void){ + if( mem.mutex==0 ){ + mem.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM); + } + sqlite3_mutex_enter(mem.mutex); +} + +/* +** Return the amount of memory currently checked out. +*/ +sqlite3_int64 sqlite3_memory_used(void){ + sqlite3_int64 n; + enterMem(); + n = mem.nowUsed; + sqlite3_mutex_leave(mem.mutex); + return n; +} + +/* +** Return the maximum amount of memory that has ever been +** checked out since either the beginning of this process +** or since the most recent reset. +*/ +sqlite3_int64 sqlite3_memory_highwater(int resetFlag){ + sqlite3_int64 n; + enterMem(); + n = mem.mxUsed; + if( resetFlag ){ + mem.mxUsed = mem.nowUsed; + } + sqlite3_mutex_leave(mem.mutex); + return n; +} + +/* +** Change the alarm callback +*/ +int sqlite3_memory_alarm( + void(*xCallback)(void *pArg, sqlite3_int64 used,int N), + void *pArg, + sqlite3_int64 iThreshold +){ + enterMem(); + mem.alarmCallback = xCallback; + mem.alarmArg = pArg; + mem.alarmThreshold = iThreshold; + sqlite3_mutex_leave(mem.mutex); + return SQLITE_OK; +} + +/* +** Trigger the alarm +*/ +static void sqlite3MemsysAlarm(int nByte){ + void (*xCallback)(void*,sqlite3_int64,int); + sqlite3_int64 nowUsed; + void *pArg; + if( mem.alarmCallback==0 || mem.alarmBusy ) return; + mem.alarmBusy = 1; + xCallback = mem.alarmCallback; + nowUsed = mem.nowUsed; + pArg = mem.alarmArg; + sqlite3_mutex_leave(mem.mutex); + xCallback(pArg, nowUsed, nByte); + sqlite3_mutex_enter(mem.mutex); + mem.alarmBusy = 0; +} + +/* +** Allocate nBytes of memory +*/ +void *sqlite3_malloc(int nBytes){ + sqlite3_int64 *p = 0; + if( nBytes>0 ){ + enterMem(); + if( mem.alarmCallback!=0 && mem.nowUsed+nBytes>=mem.alarmThreshold ){ + sqlite3MemsysAlarm(nBytes); + } + p = malloc(nBytes+8); + if( p==0 ){ + sqlite3MemsysAlarm(nBytes); + p = malloc(nBytes+8); + } + if( p ){ + p[0] = nBytes; + p++; + mem.nowUsed += nBytes; + if( mem.nowUsed>mem.mxUsed ){ + mem.mxUsed = mem.nowUsed; + } + } + sqlite3_mutex_leave(mem.mutex); + } + return (void*)p; +} + +/* +** Free memory. +*/ +void sqlite3_free(void *pPrior){ + sqlite3_int64 *p; + int nByte; + if( pPrior==0 ){ + return; + } + assert( mem.mutex!=0 ); + p = pPrior; + p--; + nByte = (int)*p; + sqlite3_mutex_enter(mem.mutex); + mem.nowUsed -= nByte; + free(p); + sqlite3_mutex_leave(mem.mutex); +} + +/* +** Change the size of an existing memory allocation +*/ +void *sqlite3_realloc(void *pPrior, int nBytes){ + int nOld; + sqlite3_int64 *p; + if( pPrior==0 ){ + return sqlite3_malloc(nBytes); + } + if( nBytes<=0 ){ + sqlite3_free(pPrior); + return 0; + } + p = pPrior; + p--; + nOld = (int)p[0]; + assert( mem.mutex!=0 ); + sqlite3_mutex_enter(mem.mutex); + if( mem.nowUsed+nBytes-nOld>=mem.alarmThreshold ){ + sqlite3MemsysAlarm(nBytes-nOld); + } + p = realloc(p, nBytes+8); + if( p==0 ){ + sqlite3MemsysAlarm(nBytes); + p = realloc(p, nBytes+8); + } + if( p ){ + p[0] = nBytes; + p++; + mem.nowUsed += nBytes-nOld; + if( mem.nowUsed>mem.mxUsed ){ + mem.mxUsed = mem.nowUsed; + } + } + sqlite3_mutex_leave(mem.mutex); + return (void*)p; +} + +#endif /* !SQLITE_MEMDEBUG && !SQLITE_OMIT_MEMORY_ALLOCATION */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/mem2.c b/libraries/sqlite/unix/sqlite-3.5.1/src/mem2.c new file mode 100644 index 0000000000..7c509fff49 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/mem2.c @@ -0,0 +1,546 @@ +/* +** 2007 August 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement a memory +** allocation subsystem for use by SQLite. +** +** $Id: mem2.c,v 1.14 2007/10/03 08:46:45 danielk1977 Exp $ +*/ + +/* +** This version of the memory allocator is used only if the +** SQLITE_MEMDEBUG macro is defined and SQLITE_OMIT_MEMORY_ALLOCATION +** is not defined. +*/ +#if defined(SQLITE_MEMDEBUG) && !defined(SQLITE_OMIT_MEMORY_ALLOCATION) + +/* +** We will eventually construct multiple memory allocation subsystems +** suitable for use in various contexts: +** +** * Normal multi-threaded builds +** * Normal single-threaded builds +** * Debugging builds +** +** This version is suitable for use in debugging builds. +** +** Features: +** +** * Every allocate has guards at both ends. +** * New allocations are initialized with randomness +** * Allocations are overwritten with randomness when freed +** * Optional logs of malloc activity generated +** * Summary of outstanding allocations with backtraces to the +** point of allocation. +** * The ability to simulate memory allocation failure +*/ +#include "sqliteInt.h" +#include + +/* +** The backtrace functionality is only available with GLIBC +*/ +#ifdef __GLIBC__ + extern int backtrace(void**,int); + extern void backtrace_symbols_fd(void*const*,int,int); +#else +# define backtrace(A,B) 0 +# define backtrace_symbols_fd(A,B,C) +#endif + +/* +** Each memory allocation looks like this: +** +** ------------------------------------------------------------------------ +** | Title | backtrace pointers | MemBlockHdr | allocation | EndGuard | +** ------------------------------------------------------------------------ +** +** The application code sees only a pointer to the allocation. We have +** to back up from the allocation pointer to find the MemBlockHdr. The +** MemBlockHdr tells us the size of the allocation and the number of +** backtrace pointers. There is also a guard word at the end of the +** MemBlockHdr. +*/ +struct MemBlockHdr { + struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */ + int iSize; /* Size of this allocation */ + char nBacktrace; /* Number of backtraces on this alloc */ + char nBacktraceSlots; /* Available backtrace slots */ + short nTitle; /* Bytes of title; includes '\0' */ + int iForeGuard; /* Guard word for sanity */ +}; + +/* +** Guard words +*/ +#define FOREGUARD 0x80F5E153 +#define REARGUARD 0xE4676B53 + +/* +** All of the static variables used by this module are collected +** into a single structure named "mem". This is to keep the +** static variables organized and to reduce namespace pollution +** when this module is combined with other in the amalgamation. +*/ +static struct { + /* + ** The alarm callback and its arguments. The mem.mutex lock will + ** be held while the callback is running. Recursive calls into + ** the memory subsystem are allowed, but no new callbacks will be + ** issued. The alarmBusy variable is set to prevent recursive + ** callbacks. + */ + sqlite3_int64 alarmThreshold; + void (*alarmCallback)(void*, sqlite3_int64, int); + void *alarmArg; + int alarmBusy; + + /* + ** Mutex to control access to the memory allocation subsystem. + */ + sqlite3_mutex *mutex; + + /* + ** Current allocation and high-water mark. + */ + sqlite3_int64 nowUsed; + sqlite3_int64 mxUsed; + + /* + ** Head and tail of a linked list of all outstanding allocations + */ + struct MemBlockHdr *pFirst; + struct MemBlockHdr *pLast; + + /* + ** The number of levels of backtrace to save in new allocations. + */ + int nBacktrace; + + /* + ** Title text to insert in front of each block + */ + int nTitle; /* Bytes of zTitle to save. Includes '\0' and padding */ + char zTitle[100]; /* The title text */ + + /* + ** These values are used to simulate malloc failures. When + ** iFail is 1, simulate a malloc failures and reset the value + ** to iReset. + */ + int iFail; /* Decrement and fail malloc when this is 1 */ + int iReset; /* When malloc fails set iiFail to this value */ + int iFailCnt; /* Number of failures */ + int iBenignFailCnt; /* Number of benign failures */ + int iNextIsBenign; /* True if the next call to malloc may fail benignly */ + int iIsBenign; /* All malloc calls may fail benignly */ + + /* + ** sqlite3MallocDisallow() increments the following counter. + ** sqlite3MallocAllow() decrements it. + */ + int disallow; /* Do not allow memory allocation */ + + +} mem; + + +/* +** Enter the mutex mem.mutex. Allocate it if it is not already allocated. +*/ +static void enterMem(void){ + if( mem.mutex==0 ){ + mem.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM); + } + sqlite3_mutex_enter(mem.mutex); +} + +/* +** Return the amount of memory currently checked out. +*/ +sqlite3_int64 sqlite3_memory_used(void){ + sqlite3_int64 n; + enterMem(); + n = mem.nowUsed; + sqlite3_mutex_leave(mem.mutex); + return n; +} + +/* +** Return the maximum amount of memory that has ever been +** checked out since either the beginning of this process +** or since the most recent reset. +*/ +sqlite3_int64 sqlite3_memory_highwater(int resetFlag){ + sqlite3_int64 n; + enterMem(); + n = mem.mxUsed; + if( resetFlag ){ + mem.mxUsed = mem.nowUsed; + } + sqlite3_mutex_leave(mem.mutex); + return n; +} + +/* +** Change the alarm callback +*/ +int sqlite3_memory_alarm( + void(*xCallback)(void *pArg, sqlite3_int64 used, int N), + void *pArg, + sqlite3_int64 iThreshold +){ + enterMem(); + mem.alarmCallback = xCallback; + mem.alarmArg = pArg; + mem.alarmThreshold = iThreshold; + sqlite3_mutex_leave(mem.mutex); + return SQLITE_OK; +} + +/* +** Trigger the alarm +*/ +static void sqlite3MemsysAlarm(int nByte){ + void (*xCallback)(void*,sqlite3_int64,int); + sqlite3_int64 nowUsed; + void *pArg; + if( mem.alarmCallback==0 || mem.alarmBusy ) return; + mem.alarmBusy = 1; + xCallback = mem.alarmCallback; + nowUsed = mem.nowUsed; + pArg = mem.alarmArg; + sqlite3_mutex_leave(mem.mutex); + xCallback(pArg, nowUsed, nByte); + sqlite3_mutex_enter(mem.mutex); + mem.alarmBusy = 0; +} + +/* +** Given an allocation, find the MemBlockHdr for that allocation. +** +** This routine checks the guards at either end of the allocation and +** if they are incorrect it asserts. +*/ +static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){ + struct MemBlockHdr *p; + int *pInt; + + p = (struct MemBlockHdr*)pAllocation; + p--; + assert( p->iForeGuard==FOREGUARD ); + assert( (p->iSize & 3)==0 ); + pInt = (int*)pAllocation; + assert( pInt[p->iSize/sizeof(int)]==REARGUARD ); + return p; +} + +/* +** This routine is called once the first time a simulated memory +** failure occurs. The sole purpose of this routine is to provide +** a convenient place to set a debugger breakpoint when debugging +** errors related to malloc() failures. +*/ +static void sqlite3MemsysFailed(void){ + mem.iFailCnt = 0; + mem.iBenignFailCnt = 0; +} + +/* +** Allocate nByte bytes of memory. +*/ +void *sqlite3_malloc(int nByte){ + struct MemBlockHdr *pHdr; + void **pBt; + char *z; + int *pInt; + void *p = 0; + int totalSize; + + if( nByte>0 ){ + enterMem(); + assert( mem.disallow==0 ); + if( mem.alarmCallback!=0 && mem.nowUsed+nByte>=mem.alarmThreshold ){ + sqlite3MemsysAlarm(nByte); + } + nByte = (nByte+3)&~3; + totalSize = nByte + sizeof(*pHdr) + sizeof(int) + + mem.nBacktrace*sizeof(void*) + mem.nTitle; + if( mem.iFail>0 ){ + if( mem.iFail==1 ){ + p = 0; + mem.iFail = mem.iReset; + if( mem.iFailCnt==0 ){ + sqlite3MemsysFailed(); /* A place to set a breakpoint */ + } + mem.iFailCnt++; + if( mem.iNextIsBenign || mem.iIsBenign ){ + mem.iBenignFailCnt++; + } + }else{ + p = malloc(totalSize); + mem.iFail--; + } + }else{ + p = malloc(totalSize); + if( p==0 ){ + sqlite3MemsysAlarm(nByte); + p = malloc(totalSize); + } + } + if( p ){ + z = p; + pBt = (void**)&z[mem.nTitle]; + pHdr = (struct MemBlockHdr*)&pBt[mem.nBacktrace]; + pHdr->pNext = 0; + pHdr->pPrev = mem.pLast; + if( mem.pLast ){ + mem.pLast->pNext = pHdr; + }else{ + mem.pFirst = pHdr; + } + mem.pLast = pHdr; + pHdr->iForeGuard = FOREGUARD; + pHdr->nBacktraceSlots = mem.nBacktrace; + pHdr->nTitle = mem.nTitle; + if( mem.nBacktrace ){ + void *aAddr[40]; + pHdr->nBacktrace = backtrace(aAddr, mem.nBacktrace+1)-1; + memcpy(pBt, &aAddr[1], pHdr->nBacktrace*sizeof(void*)); + }else{ + pHdr->nBacktrace = 0; + } + if( mem.nTitle ){ + memcpy(z, mem.zTitle, mem.nTitle); + } + pHdr->iSize = nByte; + pInt = (int*)&pHdr[1]; + pInt[nByte/sizeof(int)] = REARGUARD; + memset(pInt, 0x65, nByte); + mem.nowUsed += nByte; + if( mem.nowUsed>mem.mxUsed ){ + mem.mxUsed = mem.nowUsed; + } + p = (void*)pInt; + } + sqlite3_mutex_leave(mem.mutex); + } + mem.iNextIsBenign = 0; + return p; +} + +/* +** Free memory. +*/ +void sqlite3_free(void *pPrior){ + struct MemBlockHdr *pHdr; + void **pBt; + char *z; + if( pPrior==0 ){ + return; + } + assert( mem.mutex!=0 ); + pHdr = sqlite3MemsysGetHeader(pPrior); + pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + sqlite3_mutex_enter(mem.mutex); + mem.nowUsed -= pHdr->iSize; + if( pHdr->pPrev ){ + assert( pHdr->pPrev->pNext==pHdr ); + pHdr->pPrev->pNext = pHdr->pNext; + }else{ + assert( mem.pFirst==pHdr ); + mem.pFirst = pHdr->pNext; + } + if( pHdr->pNext ){ + assert( pHdr->pNext->pPrev==pHdr ); + pHdr->pNext->pPrev = pHdr->pPrev; + }else{ + assert( mem.pLast==pHdr ); + mem.pLast = pHdr->pPrev; + } + z = (char*)pBt; + z -= pHdr->nTitle; + memset(z, 0x2b, sizeof(void*)*pHdr->nBacktraceSlots + sizeof(*pHdr) + + pHdr->iSize + sizeof(int) + pHdr->nTitle); + free(z); + sqlite3_mutex_leave(mem.mutex); +} + +/* +** Change the size of an existing memory allocation. +** +** For this debugging implementation, we *always* make a copy of the +** allocation into a new place in memory. In this way, if the +** higher level code is using pointer to the old allocation, it is +** much more likely to break and we are much more liking to find +** the error. +*/ +void *sqlite3_realloc(void *pPrior, int nByte){ + struct MemBlockHdr *pOldHdr; + void *pNew; + if( pPrior==0 ){ + return sqlite3_malloc(nByte); + } + if( nByte<=0 ){ + sqlite3_free(pPrior); + return 0; + } + assert( mem.disallow==0 ); + pOldHdr = sqlite3MemsysGetHeader(pPrior); + pNew = sqlite3_malloc(nByte); + if( pNew ){ + memcpy(pNew, pPrior, nByteiSize ? nByte : pOldHdr->iSize); + if( nByte>pOldHdr->iSize ){ + memset(&((char*)pNew)[pOldHdr->iSize], 0x2b, nByte - pOldHdr->iSize); + } + sqlite3_free(pPrior); + } + return pNew; +} + +/* +** Set the number of backtrace levels kept for each allocation. +** A value of zero turns of backtracing. The number is always rounded +** up to a multiple of 2. +*/ +void sqlite3_memdebug_backtrace(int depth){ + if( depth<0 ){ depth = 0; } + if( depth>20 ){ depth = 20; } + depth = (depth+1)&0xfe; + mem.nBacktrace = depth; +} + +/* +** Set the title string for subsequent allocations. +*/ +void sqlite3_memdebug_settitle(const char *zTitle){ + int n = strlen(zTitle) + 1; + enterMem(); + if( n>=sizeof(mem.zTitle) ) n = sizeof(mem.zTitle)-1; + memcpy(mem.zTitle, zTitle, n); + mem.zTitle[n] = 0; + mem.nTitle = (n+3)&~3; + sqlite3_mutex_leave(mem.mutex); +} + +/* +** Open the file indicated and write a log of all unfreed memory +** allocations into that log. +*/ +void sqlite3_memdebug_dump(const char *zFilename){ + FILE *out; + struct MemBlockHdr *pHdr; + void **pBt; + out = fopen(zFilename, "w"); + if( out==0 ){ + fprintf(stderr, "** Unable to output memory debug output log: %s **\n", + zFilename); + return; + } + for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ + char *z = (char*)pHdr; + z -= pHdr->nBacktraceSlots*sizeof(void*) + pHdr->nTitle; + fprintf(out, "**** %d bytes at %p from %s ****\n", + pHdr->iSize, &pHdr[1], pHdr->nTitle ? z : "???"); + if( pHdr->nBacktrace ){ + fflush(out); + pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(out)); + fprintf(out, "\n"); + } + } + fclose(out); +} + +/* +** This routine is used to simulate malloc failures. +** +** After calling this routine, there will be iFail successful +** memory allocations and then a failure. If iRepeat is 1 +** all subsequent memory allocations will fail. If iRepeat is +** 0, only a single allocation will fail. If iRepeat is negative +** then the previous setting for iRepeat is unchanged. +** +** Each call to this routine overrides the previous. To disable +** the simulated allocation failure mechanism, set iFail to -1. +** +** This routine returns the number of simulated failures that have +** occurred since the previous call. +*/ +int sqlite3_memdebug_fail(int iFail, int iRepeat, int *piBenign){ + int n = mem.iFailCnt; + if( piBenign ){ + *piBenign = mem.iBenignFailCnt; + } + mem.iFail = iFail+1; + if( iRepeat>=0 ){ + mem.iReset = iRepeat; + } + mem.iFailCnt = 0; + mem.iBenignFailCnt = 0; + return n; +} + +int sqlite3_memdebug_pending(){ + return (mem.iFail-1); +} + +/* +** The following three functions are used to indicate to the test +** infrastructure which malloc() calls may fail benignly without +** affecting functionality. This can happen when resizing hash tables +** (failing to resize a hash-table is a performance hit, but not an +** error) or sometimes during a rollback operation. +** +** If the argument is true, sqlite3MallocBenignFailure() indicates that the +** next call to allocate memory may fail benignly. +** +** If sqlite3MallocEnterBenignBlock() is called with a non-zero argument, +** then all memory allocations requested before the next call to +** sqlite3MallocLeaveBenignBlock() may fail benignly. +*/ +void sqlite3MallocBenignFailure(int isBenign){ + if( isBenign ){ + mem.iNextIsBenign = 1; + } +} +void sqlite3MallocEnterBenignBlock(int isBenign){ + if( isBenign ){ + mem.iIsBenign = 1; + } +} +void sqlite3MallocLeaveBenignBlock(){ + mem.iIsBenign = 0; +} + +/* +** The following two routines are used to assert that no memory +** allocations occur between one call and the next. The use of +** these routines does not change the computed results in any way. +** These routines are like asserts. +*/ +void sqlite3MallocDisallow(void){ + assert( mem.mutex!=0 ); + sqlite3_mutex_enter(mem.mutex); + mem.disallow++; + sqlite3_mutex_leave(mem.mutex); +} +void sqlite3MallocAllow(void){ + assert( mem.mutex ); + sqlite3_mutex_enter(mem.mutex); + assert( mem.disallow>0 ); + mem.disallow--; + sqlite3_mutex_leave(mem.mutex); +} + +#endif /* SQLITE_MEMDEBUG && !SQLITE_OMIT_MEMORY_ALLOCATION */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/mutex.c b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex.c new file mode 100644 index 0000000000..5815ff2169 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex.c @@ -0,0 +1,126 @@ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes. +** +** The implementation in this file does not provide any mutual +** exclusion and is thus suitable for use only in applications +** that use SQLite in a single thread. But this implementation +** does do a lot of error checking on mutexes to make sure they +** are called correctly and at appropriate times. Hence, this +** implementation is suitable for testing. +** debugging purposes +** +** $Id: mutex.c,v 1.16 2007/09/10 16:13:00 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +#ifdef SQLITE_MUTEX_NOOP_DEBUG +/* +** In this implementation, mutexes do not provide any mutual exclusion. +** But the error checking is provided. This implementation is useful +** for test purposes. +*/ + +/* +** The mutex object +*/ +struct sqlite3_mutex { + int id; /* The mutex type */ + int cnt; /* Number of entries without a matching leave */ +}; + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. +*/ +sqlite3_mutex *sqlite3_mutex_alloc(int id){ + static sqlite3_mutex aStatic[5]; + sqlite3_mutex *pNew = 0; + switch( id ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + pNew = sqlite3_malloc(sizeof(*pNew)); + if( pNew ){ + pNew->id = id; + pNew->cnt = 0; + } + break; + } + default: { + assert( id-2 >= 0 ); + assert( id-2 < sizeof(aStatic)/sizeof(aStatic[0]) ); + pNew = &aStatic[id-2]; + pNew->id = id; + break; + } + } + return pNew; +} + +/* +** This routine deallocates a previously allocated mutex. +*/ +void sqlite3_mutex_free(sqlite3_mutex *p){ + assert( p ); + assert( p->cnt==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + sqlite3_free(p); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +void sqlite3_mutex_enter(sqlite3_mutex *p){ + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + p->cnt++; +} +int sqlite3_mutex_try(sqlite3_mutex *p){ + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + p->cnt++; + return SQLITE_OK; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +void sqlite3_mutex_leave(sqlite3_mutex *p){ + assert( p ); + assert( sqlite3_mutex_held(p) ); + p->cnt--; + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); +} + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use inside assert() statements. +*/ +int sqlite3_mutex_held(sqlite3_mutex *p){ + return p==0 || p->cnt>0; +} +int sqlite3_mutex_notheld(sqlite3_mutex *p){ + return p==0 || p->cnt==0; +} +#endif /* SQLITE_MUTEX_NOOP_DEBUG */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/mutex.h b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex.h new file mode 100644 index 0000000000..e7ec9d204f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex.h @@ -0,0 +1,82 @@ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the common header for all mutex implementations. +** The sqliteInt.h header #includes this file so that it is available +** to all source files. We break it out in an effort to keep the code +** better organized. +** +** NOTE: source files should *not* #include this header file directly. +** Source files should #include the sqliteInt.h file and let that file +** include this one indirectly. +** +** $Id: mutex.h,v 1.2 2007/08/30 14:10:30 drh Exp $ +*/ + + +#ifdef SQLITE_MUTEX_APPDEF +/* +** If SQLITE_MUTEX_APPDEF is defined, then this whole module is +** omitted and equivalent functionality must be provided by the +** application that links against the SQLite library. +*/ +#else +/* +** Figure out what version of the code to use. The choices are +** +** SQLITE_MUTEX_NOOP For single-threaded applications that +** do not desire error checking. +** +** SQLITE_MUTEX_NOOP_DEBUG For single-threaded applications with +** error checking to help verify that mutexes +** are being used correctly even though they +** are not needed. Used when SQLITE_DEBUG is +** defined on single-threaded builds. +** +** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix. +** +** SQLITE_MUTEX_W32 For multi-threaded applications on Win32. +** +** SQLITE_MUTEX_OS2 For multi-threaded applications on OS/2. +*/ +#define SQLITE_MUTEX_NOOP 1 /* The default */ +#if defined(SQLITE_DEBUG) && !SQLITE_THREADSAFE +# undef SQLITE_MUTEX_NOOP +# define SQLITE_MUTEX_NOOP_DEBUG +#endif +#if defined(SQLITE_MUTEX_NOOP) && SQLITE_THREADSAFE && OS_UNIX +# undef SQLITE_MUTEX_NOOP +# define SQLITE_MUTEX_PTHREADS +#endif +#if defined(SQLITE_MUTEX_NOOP) && SQLITE_THREADSAFE && OS_WIN +# undef SQLITE_MUTEX_NOOP +# define SQLITE_MUTEX_W32 +#endif +#if defined(SQLITE_MUTEX_NOOP) && SQLITE_THREADSAFE && OS_OS2 +# undef SQLITE_MUTEX_NOOP +# define SQLITE_MUTEX_OS2 +#endif + +#ifdef SQLITE_MUTEX_NOOP +/* +** If this is a no-op implementation, implement everything as macros. +*/ +#define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8) +#define sqlite3_mutex_free(X) +#define sqlite3_mutex_enter(X) +#define sqlite3_mutex_try(X) SQLITE_OK +#define sqlite3_mutex_leave(X) +#define sqlite3_mutex_held(X) 1 +#define sqlite3_mutex_notheld(X) 1 +#endif + +#endif /* SQLITE_MUTEX_APPDEF */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_os2.c b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_os2.c new file mode 100644 index 0000000000..e0258c7563 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_os2.c @@ -0,0 +1,236 @@ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for OS/2 +** +** $Id: mutex_os2.c,v 1.3 2007/10/02 19:56:04 pweilbacher Exp $ +*/ +#include "sqliteInt.h" + +/* +** The code in this file is only used if SQLITE_MUTEX_OS2 is defined. +** See the mutex.h file for details. +*/ +#ifdef SQLITE_MUTEX_OS2 + +/********************** OS/2 Mutex Implementation ********************** +** +** This implementation of mutexes is built using the OS/2 API. +*/ + +/* +** The mutex object +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + PSZ mutexName; /* Mutex name controlling the lock */ + HMTX mutex; /* Mutex controlling the lock */ + int id; /* Mutex type */ + int nRef; /* Number of references */ + TID owner; /* Thread holding this mutex */ +}; + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. +** SQLite will unwind its stack and return an error. The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST 0 +**
  • SQLITE_MUTEX_RECURSIVE 1 +**
  • SQLITE_MUTEX_STATIC_MASTER 2 +**
  • SQLITE_MUTEX_STATIC_MEM 3 +**
  • SQLITE_MUTEX_STATIC_PRNG 4 +**
+** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Three static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +sqlite3_mutex *sqlite3_mutex_alloc(int iType){ + PSZ mutex_name = "\\SEM32\\SQLITE\\MUTEX"; + int mutex_name_len = strlen(mutex_name) + 1; /* name length + null byte */ + sqlite3_mutex *p; + + switch( iType ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->mutexName = (PSZ)malloc(mutex_name_len); + sqlite3_snprintf(mutex_name_len, p->mutexName, "%s", mutex_name); + p->id = iType; + DosCreateMutexSem(p->mutexName, &p->mutex, 0, FALSE); + DosOpenMutexSem(p->mutexName, &p->mutex); + } + break; + } + default: { + static sqlite3_mutex staticMutexes[5]; + static int isInit = 0; + while( !isInit ) { + static long lock = 0; + DosEnterCritSec(); + lock++; + if( lock == 1 ) { + DosExitCritSec(); + int i; + for(i = 0; i < sizeof(staticMutexes)/sizeof(staticMutexes[0]); i++) { + staticMutexes[i].mutexName = (PSZ)malloc(mutex_name_len + 1); + sqlite3_snprintf(mutex_name_len + 1, /* one more for the number */ + staticMutexes[i].mutexName, "%s%1d", mutex_name, i); + DosCreateMutexSem(staticMutexes[i].mutexName, + &staticMutexes[i].mutex, 0, FALSE); + DosOpenMutexSem(staticMutexes[i].mutexName, + &staticMutexes[i].mutex); + } + isInit = 1; + } else { + DosExitCritSec(); + DosSleep(1); + } + } + assert( iType-2 >= 0 ); + assert( iType-2 < sizeof(staticMutexes)/sizeof(staticMutexes[0]) ); + p = &staticMutexes[iType-2]; + p->id = iType; + break; + } + } + return p; +} + + +/* +** This routine deallocates a previously allocated mutex. +** SQLite is careful to deallocate every mutex that it allocates. +*/ +void sqlite3_mutex_free(sqlite3_mutex *p){ + assert( p ); + assert( p->nRef==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + DosCloseMutexSem(p->mutex); + free(p->mutexName); + sqlite3_free(p); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +void sqlite3_mutex_enter(sqlite3_mutex *p){ + TID tid; + PID holder1; + ULONG holder2; + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + DosRequestMutexSem(p->mutex, SEM_INDEFINITE_WAIT); + DosQueryMutexSem(p->mutex, &holder1, &tid, &holder2); + p->owner = tid; + p->nRef++; +} +int sqlite3_mutex_try(sqlite3_mutex *p){ + int rc; + TID tid; + PID holder1; + ULONG holder2; + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + if( DosRequestMutexSem(p->mutex, SEM_IMMEDIATE_RETURN) == NO_ERROR) { + DosQueryMutexSem(p->mutex, &holder1, &tid, &holder2); + p->owner = tid; + p->nRef++; + rc = SQLITE_OK; + } else { + rc = SQLITE_BUSY; + } + + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +void sqlite3_mutex_leave(sqlite3_mutex *p){ + TID tid; + PID holder1; + ULONG holder2; + assert( p->nRef>0 ); + DosQueryMutexSem(p->mutex, &holder1, &tid, &holder2); + assert( p->owner==tid ); + p->nRef--; + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); + DosReleaseMutexSem(p->mutex); +} + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use inside assert() statements. +*/ +int sqlite3_mutex_held(sqlite3_mutex *p){ + TID tid; + PID pid; + ULONG ulCount; + PTIB ptib; + if( p!=0 ) { + DosQueryMutexSem(p->mutex, &pid, &tid, &ulCount); + } else { + DosGetInfoBlocks(&ptib, NULL); + tid = ptib->tib_ptib2->tib2_ultid; + } + return p==0 || (p->nRef!=0 && p->owner==tid); +} +int sqlite3_mutex_notheld(sqlite3_mutex *p){ + TID tid; + PID pid; + ULONG ulCount; + PTIB ptib; + if( p!= 0 ) { + DosQueryMutexSem(p->mutex, &pid, &tid, &ulCount); + } else { + DosGetInfoBlocks(&ptib, NULL); + tid = ptib->tib_ptib2->tib2_ultid; + } + return p==0 || p->nRef==0 || p->owner!=tid; +} +#endif /* SQLITE_MUTEX_OS2 */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_unix.c b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_unix.c new file mode 100644 index 0000000000..ff088fb531 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_unix.c @@ -0,0 +1,223 @@ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for pthreads +** +** $Id: mutex_unix.c,v 1.2 2007/08/28 22:24:35 drh Exp $ +*/ +#include "sqliteInt.h" + +/* +** The code in this file is only used if we are compiling threadsafe +** under unix with pthreads. +** +** Note that this implementation requires a version of pthreads that +** supports recursive mutexes. +*/ +#ifdef SQLITE_MUTEX_PTHREADS + +#include + +/* +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + pthread_mutex_t mutex; /* Mutex controlling the lock */ + int id; /* Mutex type */ + int nRef; /* Number of entrances */ + pthread_t owner; /* Thread that is within this mutex */ +#ifdef SQLITE_DEBUG + int trace; /* True to trace changes */ +#endif +}; + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. SQLite +** will unwind its stack and return an error. The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST +**
  • SQLITE_MUTEX_RECURSIVE +**
  • SQLITE_MUTEX_STATIC_MASTER +**
  • SQLITE_MUTEX_STATIC_MEM +**
  • SQLITE_MUTEX_STATIC_MEM2 +**
  • SQLITE_MUTEX_STATIC_PRNG +**
  • SQLITE_MUTEX_STATIC_LRU +**
+** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Three static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +sqlite3_mutex *sqlite3_mutex_alloc(int iType){ + static sqlite3_mutex staticMutexes[] = { + { PTHREAD_MUTEX_INITIALIZER, }, + { PTHREAD_MUTEX_INITIALIZER, }, + { PTHREAD_MUTEX_INITIALIZER, }, + { PTHREAD_MUTEX_INITIALIZER, }, + { PTHREAD_MUTEX_INITIALIZER, }, + }; + sqlite3_mutex *p; + switch( iType ){ + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + pthread_mutexattr_t recursiveAttr; + pthread_mutexattr_init(&recursiveAttr); + pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&p->mutex, &recursiveAttr); + pthread_mutexattr_destroy(&recursiveAttr); + p->id = iType; + } + break; + } + case SQLITE_MUTEX_FAST: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->id = iType; + pthread_mutex_init(&p->mutex, 0); + } + break; + } + default: { + assert( iType-2 >= 0 ); + assert( iType-2 < sizeof(staticMutexes)/sizeof(staticMutexes[0]) ); + p = &staticMutexes[iType-2]; + p->id = iType; + break; + } + } + return p; +} + + +/* +** This routine deallocates a previously +** allocated mutex. SQLite is careful to deallocate every +** mutex that it allocates. +*/ +void sqlite3_mutex_free(sqlite3_mutex *p){ + assert( p ); + assert( p->nRef==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + pthread_mutex_destroy(&p->mutex); + sqlite3_free(p); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +void sqlite3_mutex_enter(sqlite3_mutex *p){ + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + pthread_mutex_lock(&p->mutex); + p->owner = pthread_self(); + p->nRef++; +#ifdef SQLITE_DEBUG + if( p->trace ){ + printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif +} +int sqlite3_mutex_try(sqlite3_mutex *p){ + int rc; + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + if( pthread_mutex_trylock(&p->mutex)==0 ){ + p->owner = pthread_self(); + p->nRef++; + rc = SQLITE_OK; +#ifdef SQLITE_DEBUG + if( p->trace ){ + printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif + }else{ + rc = SQLITE_BUSY; + } + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +void sqlite3_mutex_leave(sqlite3_mutex *p){ + assert( p ); + assert( sqlite3_mutex_held(p) ); + p->nRef--; + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); +#ifdef SQLITE_DEBUG + if( p->trace ){ + printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif + pthread_mutex_unlock(&p->mutex); +} + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use only inside assert() statements. On some platforms, +** there might be race conditions that can cause these routines to +** deliver incorrect results. In particular, if pthread_equal() is +** not an atomic operation, then these routines might delivery +** incorrect results. On most platforms, pthread_equal() is a +** comparison of two integers and is therefore atomic. But we are +** told that HPUX is not such a platform. If so, then these routines +** will not always work correctly on HPUX. +** +** On those platforms where pthread_equal() is not atomic, SQLite +** should be compiled without -DSQLITE_DEBUG and with -DNDEBUG to +** make sure no assert() statements are evaluated and hence these +** routines are never called. +*/ +#ifndef NDEBUG +int sqlite3_mutex_held(sqlite3_mutex *p){ + return p==0 || (p->nRef!=0 && pthread_equal(p->owner, pthread_self())); +} +int sqlite3_mutex_notheld(sqlite3_mutex *p){ + return p==0 || p->nRef==0 || pthread_equal(p->owner, pthread_self())==0; +} +#endif +#endif /* SQLITE_MUTEX_PTHREAD */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_w32.c b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_w32.c new file mode 100644 index 0000000000..ee5a4088dd --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/mutex_w32.c @@ -0,0 +1,208 @@ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for win32 +** +** $Id: mutex_w32.c,v 1.4 2007/09/05 14:30:42 drh Exp $ +*/ +#include "sqliteInt.h" + +/* +** The code in this file is only used if we are compiling multithreaded +** on a win32 system. +*/ +#ifdef SQLITE_MUTEX_W32 + +/* +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + CRITICAL_SECTION mutex; /* Mutex controlling the lock */ + int id; /* Mutex type */ + int nRef; /* Number of enterances */ + DWORD owner; /* Thread holding this mutex */ +}; + +/* +** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, +** or WinCE. Return false (zero) for Win95, Win98, or WinME. +** +** Here is an interesting observation: Win95, Win98, and WinME lack +** the LockFileEx() API. But we can still statically link against that +** API as long as we don't call it win running Win95/98/ME. A call to +** this routine is used to determine if the host is Win95/98/ME or +** WinNT/2K/XP so that we will know whether or not we can safely call +** the LockFileEx() API. +*/ +#if OS_WINCE +# define mutexIsNT() (1) +#else + static int mutexIsNT(void){ + static int osType = 0; + if( osType==0 ){ + OSVERSIONINFO sInfo; + sInfo.dwOSVersionInfoSize = sizeof(sInfo); + GetVersionEx(&sInfo); + osType = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1; + } + return osType==2; + } +#endif /* OS_WINCE */ + + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. SQLite +** will unwind its stack and return an error. The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST 0 +**
  • SQLITE_MUTEX_RECURSIVE 1 +**
  • SQLITE_MUTEX_STATIC_MASTER 2 +**
  • SQLITE_MUTEX_STATIC_MEM 3 +**
  • SQLITE_MUTEX_STATIC_PRNG 4 +**
+** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Three static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +sqlite3_mutex *sqlite3_mutex_alloc(int iType){ + sqlite3_mutex *p; + + switch( iType ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->id = iType; + InitializeCriticalSection(&p->mutex); + } + break; + } + default: { + static sqlite3_mutex staticMutexes[5]; + static int isInit = 0; + while( !isInit ){ + static long lock = 0; + if( InterlockedIncrement(&lock)==1 ){ + int i; + for(i=0; i= 0 ); + assert( iType-2 < sizeof(staticMutexes)/sizeof(staticMutexes[0]) ); + p = &staticMutexes[iType-2]; + p->id = iType; + break; + } + } + return p; +} + + +/* +** This routine deallocates a previously +** allocated mutex. SQLite is careful to deallocate every +** mutex that it allocates. +*/ +void sqlite3_mutex_free(sqlite3_mutex *p){ + assert( p ); + assert( p->nRef==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + DeleteCriticalSection(&p->mutex); + sqlite3_free(p); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +void sqlite3_mutex_enter(sqlite3_mutex *p){ + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + EnterCriticalSection(&p->mutex); + p->owner = GetCurrentThreadId(); + p->nRef++; +} +int sqlite3_mutex_try(sqlite3_mutex *p){ + int rc; + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || sqlite3_mutex_notheld(p) ); + if( mutexIsNT() && TryEnterCriticalSection(&p->mutex) ){ + p->owner = GetCurrentThreadId(); + p->nRef++; + rc = SQLITE_OK; + }else{ + rc = SQLITE_BUSY; + } + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +void sqlite3_mutex_leave(sqlite3_mutex *p){ + assert( p->nRef>0 ); + assert( p->owner==GetCurrentThreadId() ); + p->nRef--; + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); + LeaveCriticalSection(&p->mutex); +} + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use only inside assert() statements. +*/ +int sqlite3_mutex_held(sqlite3_mutex *p){ + return p==0 || (p->nRef!=0 && p->owner==GetCurrentThreadId()); +} +int sqlite3_mutex_notheld(sqlite3_mutex *p){ + return p==0 || p->nRef==0 || p->owner!=GetCurrentThreadId(); +} +#endif /* SQLITE_MUTEX_W32 */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/os.c b/libraries/sqlite/unix/sqlite-3.5.1/src/os.c new file mode 100644 index 0000000000..3b6ca7bcaf --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/os.c @@ -0,0 +1,282 @@ + /* +** 2005 November 29 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains OS interface code that is common to all +** architectures. +*/ +#define _SQLITE_OS_C_ 1 +#include "sqliteInt.h" +#undef _SQLITE_OS_C_ + +/* +** The default SQLite sqlite3_vfs implementations do not allocate +** memory (actually, os_unix.c allocates a small amount of memory +** from within OsOpen()), but some third-party implementations may. +** So we test the effects of a malloc() failing and the sqlite3OsXXX() +** function returning SQLITE_IOERR_NOMEM using the DO_OS_MALLOC_TEST macro. +** +** The following functions are instrumented for malloc() failure +** testing: +** +** sqlite3OsOpen() +** sqlite3OsRead() +** sqlite3OsWrite() +** sqlite3OsSync() +** sqlite3OsLock() +** +*/ +#ifdef SQLITE_TEST + #define DO_OS_MALLOC_TEST if (1) { \ + void *pTstAlloc = sqlite3_malloc(10); \ + if (!pTstAlloc) return SQLITE_IOERR_NOMEM; \ + sqlite3_free(pTstAlloc); \ + } +#else + #define DO_OS_MALLOC_TEST +#endif + +/* +** The following routines are convenience wrappers around methods +** of the sqlite3_file object. This is mostly just syntactic sugar. All +** of this would be completely automatic if SQLite were coded using +** C++ instead of plain old C. +*/ +int sqlite3OsClose(sqlite3_file *pId){ + int rc = SQLITE_OK; + if( pId->pMethods ){ + rc = pId->pMethods->xClose(pId); + pId->pMethods = 0; + } + return rc; +} +int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){ + DO_OS_MALLOC_TEST; + return id->pMethods->xRead(id, pBuf, amt, offset); +} +int sqlite3OsWrite(sqlite3_file *id, const void *pBuf, int amt, i64 offset){ + DO_OS_MALLOC_TEST; + return id->pMethods->xWrite(id, pBuf, amt, offset); +} +int sqlite3OsTruncate(sqlite3_file *id, i64 size){ + return id->pMethods->xTruncate(id, size); +} +int sqlite3OsSync(sqlite3_file *id, int flags){ + DO_OS_MALLOC_TEST; + return id->pMethods->xSync(id, flags); +} +int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){ + return id->pMethods->xFileSize(id, pSize); +} +int sqlite3OsLock(sqlite3_file *id, int lockType){ + DO_OS_MALLOC_TEST; + return id->pMethods->xLock(id, lockType); +} +int sqlite3OsUnlock(sqlite3_file *id, int lockType){ + return id->pMethods->xUnlock(id, lockType); +} +int sqlite3OsCheckReservedLock(sqlite3_file *id){ + return id->pMethods->xCheckReservedLock(id); +} +int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ + return id->pMethods->xFileControl(id,op,pArg); +} + +#ifdef SQLITE_TEST + /* The following two variables are used to override the values returned + ** by the xSectorSize() and xDeviceCharacteristics() vfs methods for + ** testing purposes. They are usually set by a test command implemented + ** in test6.c. + */ + int sqlite3_test_sector_size = 0; + int sqlite3_test_device_characteristics = 0; + int sqlite3OsDeviceCharacteristics(sqlite3_file *id){ + int dc = id->pMethods->xDeviceCharacteristics(id); + return dc | sqlite3_test_device_characteristics; + } + int sqlite3OsSectorSize(sqlite3_file *id){ + if( sqlite3_test_sector_size==0 ){ + int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize; + return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE); + } + return sqlite3_test_sector_size; + } +#else + int sqlite3OsSectorSize(sqlite3_file *id){ + int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize; + return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE); + } + int sqlite3OsDeviceCharacteristics(sqlite3_file *id){ + return id->pMethods->xDeviceCharacteristics(id); + } +#endif + +/* +** The next group of routines are convenience wrappers around the +** VFS methods. +*/ +int sqlite3OsOpen( + sqlite3_vfs *pVfs, + const char *zPath, + sqlite3_file *pFile, + int flags, + int *pFlagsOut +){ + DO_OS_MALLOC_TEST; + return pVfs->xOpen(pVfs, zPath, pFile, flags, pFlagsOut); +} +int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + return pVfs->xDelete(pVfs, zPath, dirSync); +} +int sqlite3OsAccess(sqlite3_vfs *pVfs, const char *zPath, int flags){ + return pVfs->xAccess(pVfs, zPath, flags); +} +int sqlite3OsGetTempname(sqlite3_vfs *pVfs, int nBufOut, char *zBufOut){ + return pVfs->xGetTempname(pVfs, nBufOut, zBufOut); +} +int sqlite3OsFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nPathOut, + char *zPathOut +){ + return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); +} +void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return pVfs->xDlOpen(pVfs, zPath); +} +void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + pVfs->xDlError(pVfs, nByte, zBufOut); +} +void *sqlite3OsDlSym(sqlite3_vfs *pVfs, void *pHandle, const char *zSymbol){ + return pVfs->xDlSym(pVfs, pHandle, zSymbol); +} +void sqlite3OsDlClose(sqlite3_vfs *pVfs, void *pHandle){ + pVfs->xDlClose(pVfs, pHandle); +} +int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return pVfs->xRandomness(pVfs, nByte, zBufOut); +} +int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){ + return pVfs->xSleep(pVfs, nMicro); +} +int sqlite3OsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + return pVfs->xCurrentTime(pVfs, pTimeOut); +} + +int sqlite3OsOpenMalloc( + sqlite3_vfs *pVfs, + const char *zFile, + sqlite3_file **ppFile, + int flags, + int *pOutFlags +){ + int rc = SQLITE_NOMEM; + sqlite3_file *pFile; + pFile = (sqlite3_file *)sqlite3_malloc(pVfs->szOsFile); + if( pFile ){ + rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags); + if( rc!=SQLITE_OK ){ + sqlite3_free(pFile); + }else{ + *ppFile = pFile; + } + } + return rc; +} +int sqlite3OsCloseFree(sqlite3_file *pFile){ + int rc = SQLITE_OK; + if( pFile ){ + rc = sqlite3OsClose(pFile); + sqlite3_free(pFile); + } + return rc; +} + +/* +** The list of all registered VFS implementations. This list is +** initialized to the single VFS returned by sqlite3OsDefaultVfs() +** upon the first call to sqlite3_vfs_find(). +*/ +static sqlite3_vfs *vfsList = 0; + +/* +** Locate a VFS by name. If no name is given, simply return the +** first VFS on the list. +*/ +sqlite3_vfs *sqlite3_vfs_find(const char *zVfs){ + sqlite3_mutex *mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_vfs *pVfs; + static int isInit = 0; + sqlite3_mutex_enter(mutex); + if( !isInit ){ + vfsList = sqlite3OsDefaultVfs(); + isInit = 1; + } + for(pVfs = vfsList; pVfs; pVfs=pVfs->pNext){ + if( zVfs==0 ) break; + if( strcmp(zVfs, pVfs->zName)==0 ) break; + } + sqlite3_mutex_leave(mutex); + return pVfs; +} + +/* +** Unlink a VFS from the linked list +*/ +static void vfsUnlink(sqlite3_vfs *pVfs){ + assert( sqlite3_mutex_held(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)) ); + if( vfsList==pVfs ){ + vfsList = pVfs->pNext; + }else{ + sqlite3_vfs *p = vfsList; + while( p->pNext && p->pNext!=pVfs ){ + p = p->pNext; + } + if( p->pNext==pVfs ){ + p->pNext = pVfs->pNext; + } + } +} + +/* +** Register a VFS with the system. It is harmless to register the same +** VFS multiple times. The new VFS becomes the default if makeDflt is +** true. +*/ +int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){ + sqlite3_mutex *mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_vfs_find(0); /* Make sure we are initialized */ + sqlite3_mutex_enter(mutex); + vfsUnlink(pVfs); + if( makeDflt || vfsList==0 ){ + pVfs->pNext = vfsList; + vfsList = pVfs; + }else{ + pVfs->pNext = vfsList->pNext; + vfsList->pNext = pVfs; + } + assert(vfsList); + sqlite3_mutex_leave(mutex); + return SQLITE_OK; +} + +/* +** Unregister a VFS so that it is no longer accessible. +*/ +int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){ + sqlite3_mutex *mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(mutex); + vfsUnlink(pVfs); + assert(vfsList); + sqlite3_mutex_leave(mutex); + return SQLITE_OK; +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/os.h b/libraries/sqlite/unix/sqlite-3.5.1/src/os.h new file mode 100644 index 0000000000..554952df00 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/os.h @@ -0,0 +1,284 @@ +/* +** 2001 September 16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file (together with is companion C source-code file +** "os.c") attempt to abstract the underlying operating system so that +** the SQLite library will work on both POSIX and windows systems. +** +** This header file is #include-ed by sqliteInt.h and thus ends up +** being included by every source file. +*/ +#ifndef _SQLITE_OS_H_ +#define _SQLITE_OS_H_ + +/* +** Figure out if we are dealing with Unix, Windows, or some other +** operating system. After the following block of preprocess macros, +** all of OS_UNIX, OS_WIN, OS_OS2, and OS_OTHER will defined to either +** 1 or 0. One of the four will be 1. The other three will be 0. +*/ +#if defined(OS_OTHER) +# if OS_OTHER==1 +# undef OS_UNIX +# define OS_UNIX 0 +# undef OS_WIN +# define OS_WIN 0 +# undef OS_OS2 +# define OS_OS2 0 +# else +# undef OS_OTHER +# endif +#endif +#if !defined(OS_UNIX) && !defined(OS_OTHER) +# define OS_OTHER 0 +# ifndef OS_WIN +# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__BORLANDC__) +# define OS_WIN 1 +# define OS_UNIX 0 +# define OS_OS2 0 +# elif defined(__EMX__) || defined(_OS2) || defined(OS2) || defined(_OS2_) || defined(__OS2__) +# define OS_WIN 0 +# define OS_UNIX 0 +# define OS_OS2 1 +# else +# define OS_WIN 0 +# define OS_UNIX 1 +# define OS_OS2 0 +# endif +# else +# define OS_UNIX 0 +# define OS_OS2 0 +# endif +#else +# ifndef OS_WIN +# define OS_WIN 0 +# endif +#endif + + + +/* +** Define the maximum size of a temporary filename +*/ +#if OS_WIN +# include +# define SQLITE_TEMPNAME_SIZE (MAX_PATH+50) +#elif OS_OS2 +# if (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >= 3) && defined(OS2_HIGH_MEMORY) +# include /* has to be included before os2.h for linking to work */ +# endif +# define INCL_DOSDATETIME +# define INCL_DOSFILEMGR +# define INCL_DOSERRORS +# define INCL_DOSMISC +# define INCL_DOSPROCESS +# define INCL_DOSMODULEMGR +# include +# define SQLITE_TEMPNAME_SIZE (CCHMAXPATHCOMP) +#else +# define SQLITE_TEMPNAME_SIZE 200 +#endif + +/* If the SET_FULLSYNC macro is not defined above, then make it +** a no-op +*/ +#ifndef SET_FULLSYNC +# define SET_FULLSYNC(x,y) +#endif + +/* +** The default size of a disk sector +*/ +#ifndef SQLITE_DEFAULT_SECTOR_SIZE +# define SQLITE_DEFAULT_SECTOR_SIZE 512 +#endif + +/* +** Temporary files are named starting with this prefix followed by 16 random +** alphanumeric characters, and no file extension. They are stored in the +** OS's standard temporary file directory, and are deleted prior to exit. +** If sqlite is being embedded in another program, you may wish to change the +** prefix to reflect your program's name, so that if your program exits +** prematurely, old temporary files can be easily identified. This can be done +** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. +** +** 2006-10-31: The default prefix used to be "sqlite_". But then +** Mcafee started using SQLite in their anti-virus product and it +** started putting files with the "sqlite" name in the c:/temp folder. +** This annoyed many windows users. Those users would then do a +** Google search for "sqlite", find the telephone numbers of the +** developers and call to wake them up at night and complain. +** For this reason, the default name prefix is changed to be "sqlite" +** spelled backwards. So the temp files are still identified, but +** anybody smart enough to figure out the code is also likely smart +** enough to know that calling the developer will not help get rid +** of the file. +*/ +#ifndef SQLITE_TEMP_FILE_PREFIX +# define SQLITE_TEMP_FILE_PREFIX "etilqs_" +#endif + +/* +** If using an alternative OS interface, then we must have an "os_other.h" +** header file available for that interface. Presumably the "os_other.h" +** header file contains #defines similar to those above. +*/ +#if OS_OTHER +# include "os_other.h" +#endif + + +/* +** The following values may be passed as the second argument to +** sqlite3OsLock(). The various locks exhibit the following semantics: +** +** SHARED: Any number of processes may hold a SHARED lock simultaneously. +** RESERVED: A single process may hold a RESERVED lock on a file at +** any time. Other processes may hold and obtain new SHARED locks. +** PENDING: A single process may hold a PENDING lock on a file at +** any one time. Existing SHARED locks may persist, but no new +** SHARED locks may be obtained by other processes. +** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. +** +** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a +** process that requests an EXCLUSIVE lock may actually obtain a PENDING +** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to +** sqlite3OsLock(). +*/ +#define NO_LOCK 0 +#define SHARED_LOCK 1 +#define RESERVED_LOCK 2 +#define PENDING_LOCK 3 +#define EXCLUSIVE_LOCK 4 + +/* +** File Locking Notes: (Mostly about windows but also some info for Unix) +** +** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because +** those functions are not available. So we use only LockFile() and +** UnlockFile(). +** +** LockFile() prevents not just writing but also reading by other processes. +** A SHARED_LOCK is obtained by locking a single randomly-chosen +** byte out of a specific range of bytes. The lock byte is obtained at +** random so two separate readers can probably access the file at the +** same time, unless they are unlucky and choose the same lock byte. +** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. +** There can only be one writer. A RESERVED_LOCK is obtained by locking +** a single byte of the file that is designated as the reserved lock byte. +** A PENDING_LOCK is obtained by locking a designated byte different from +** the RESERVED_LOCK byte. +** +** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, +** which means we can use reader/writer locks. When reader/writer locks +** are used, the lock is placed on the same range of bytes that is used +** for probabilistic locking in Win95/98/ME. Hence, the locking scheme +** will support two or more Win95 readers or two or more WinNT readers. +** But a single Win95 reader will lock out all WinNT readers and a single +** WinNT reader will lock out all other Win95 readers. +** +** The following #defines specify the range of bytes used for locking. +** SHARED_SIZE is the number of bytes available in the pool from which +** a random byte is selected for a shared lock. The pool of bytes for +** shared locks begins at SHARED_FIRST. +** +** These #defines are available in sqlite_aux.h so that adaptors for +** connecting SQLite to other operating systems can use the same byte +** ranges for locking. In particular, the same locking strategy and +** byte ranges are used for Unix. This leaves open the possiblity of having +** clients on win95, winNT, and unix all talking to the same shared file +** and all locking correctly. To do so would require that samba (or whatever +** tool is being used for file sharing) implements locks correctly between +** windows and unix. I'm guessing that isn't likely to happen, but by +** using the same locking range we are at least open to the possibility. +** +** Locking in windows is manditory. For this reason, we cannot store +** actual data in the bytes used for locking. The pager never allocates +** the pages involved in locking therefore. SHARED_SIZE is selected so +** that all locks will fit on a single page even at the minimum page size. +** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE +** is set high so that we don't have to allocate an unused page except +** for very large databases. But one should test the page skipping logic +** by setting PENDING_BYTE low and running the entire regression suite. +** +** Changing the value of PENDING_BYTE results in a subtly incompatible +** file format. Depending on how it is changed, you might not notice +** the incompatibility right away, even running a full regression test. +** The default location of PENDING_BYTE is the first byte past the +** 1GB boundary. +** +*/ +#ifndef SQLITE_TEST +#define PENDING_BYTE 0x40000000 /* First byte past the 1GB boundary */ +#else +extern unsigned int sqlite3_pending_byte; +#define PENDING_BYTE sqlite3_pending_byte +#endif + +#define RESERVED_BYTE (PENDING_BYTE+1) +#define SHARED_FIRST (PENDING_BYTE+2) +#define SHARED_SIZE 510 + +/* +** Functions for accessing sqlite3_file methods +*/ +int sqlite3OsClose(sqlite3_file*); +int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); +int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); +int sqlite3OsTruncate(sqlite3_file*, i64 size); +int sqlite3OsSync(sqlite3_file*, int); +int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); +int sqlite3OsLock(sqlite3_file*, int); +int sqlite3OsUnlock(sqlite3_file*, int); +int sqlite3OsCheckReservedLock(sqlite3_file *id); +int sqlite3OsFileControl(sqlite3_file*,int,void*); +int sqlite3OsSectorSize(sqlite3_file *id); +int sqlite3OsDeviceCharacteristics(sqlite3_file *id); + +/* +** Functions for accessing sqlite3_vfs methods +*/ +int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); +int sqlite3OsDelete(sqlite3_vfs *, const char *, int); +int sqlite3OsAccess(sqlite3_vfs *, const char *, int); +int sqlite3OsGetTempname(sqlite3_vfs *, int, char *); +int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); +void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); +void sqlite3OsDlError(sqlite3_vfs *, int, char *); +void *sqlite3OsDlSym(sqlite3_vfs *, void *, const char *); +void sqlite3OsDlClose(sqlite3_vfs *, void *); +int sqlite3OsRandomness(sqlite3_vfs *, int, char *); +int sqlite3OsSleep(sqlite3_vfs *, int); +int sqlite3OsCurrentTime(sqlite3_vfs *, double*); + +/* +** Convenience functions for opening and closing files using +** sqlite3_malloc() to obtain space for the file-handle structure. +*/ +int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); +int sqlite3OsCloseFree(sqlite3_file *); + +/* +** Each OS-specific backend defines an instance of the following +** structure for returning a pointer to its sqlite3_vfs. If OS_OTHER +** is defined (meaning that the application-defined OS interface layer +** is used) then there is no default VFS. The application must +** register one or more VFS structures using sqlite3_vfs_register() +** before attempting to use SQLite. +*/ +#if OS_UNIX || OS_WIN || OS_OS2 +sqlite3_vfs *sqlite3OsDefaultVfs(void); +#else +# define sqlite3OsDefaultVfs(X) 0 +#endif + +#endif /* _SQLITE_OS_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/os_common.h b/libraries/sqlite/unix/sqlite-3.5.1/src/os_common.h new file mode 100644 index 0000000000..8de4be9718 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/os_common.h @@ -0,0 +1,127 @@ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains macros and a little bit of code that is common to +** all of the platform-specific files (os_*.c) and is #included into those +** files. +** +** This file should be #included by the os_*.c files only. It is not a +** general purpose header file. +*/ + +/* +** At least two bugs have slipped in because we changed the MEMORY_DEBUG +** macro to SQLITE_DEBUG and some older makefiles have not yet made the +** switch. The following code should catch this problem at compile-time. +*/ +#ifdef MEMORY_DEBUG +# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." +#endif + + +/* + * When testing, this global variable stores the location of the + * pending-byte in the database file. + */ +#ifdef SQLITE_TEST +unsigned int sqlite3_pending_byte = 0x40000000; +#endif + +#ifdef SQLITE_DEBUG +int sqlite3_os_trace = 0; +#define OSTRACE1(X) if( sqlite3_os_trace ) sqlite3DebugPrintf(X) +#define OSTRACE2(X,Y) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y) +#define OSTRACE3(X,Y,Z) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y,Z) +#define OSTRACE4(X,Y,Z,A) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y,Z,A) +#define OSTRACE5(X,Y,Z,A,B) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y,Z,A,B) +#define OSTRACE6(X,Y,Z,A,B,C) \ + if(sqlite3_os_trace) sqlite3DebugPrintf(X,Y,Z,A,B,C) +#define OSTRACE7(X,Y,Z,A,B,C,D) \ + if(sqlite3_os_trace) sqlite3DebugPrintf(X,Y,Z,A,B,C,D) +#else +#define OSTRACE1(X) +#define OSTRACE2(X,Y) +#define OSTRACE3(X,Y,Z) +#define OSTRACE4(X,Y,Z,A) +#define OSTRACE5(X,Y,Z,A,B) +#define OSTRACE6(X,Y,Z,A,B,C) +#define OSTRACE7(X,Y,Z,A,B,C,D) +#endif + +/* +** Macros for performance tracing. Normally turned off. Only works +** on i486 hardware. +*/ +#ifdef SQLITE_PERFORMANCE_TRACE +__inline__ unsigned long long int hwtime(void){ + unsigned long long int x; + __asm__("rdtsc\n\t" + "mov %%edx, %%ecx\n\t" + :"=A" (x)); + return x; +} +static unsigned long long int g_start; +static unsigned int elapse; +#define TIMER_START g_start=hwtime() +#define TIMER_END elapse=hwtime()-g_start +#define TIMER_ELAPSED elapse +#else +#define TIMER_START +#define TIMER_END +#define TIMER_ELAPSED 0 +#endif + +/* +** If we compile with the SQLITE_TEST macro set, then the following block +** of code will give us the ability to simulate a disk I/O error. This +** is used for testing the I/O recovery logic. +*/ +#ifdef SQLITE_TEST +int sqlite3_io_error_hit = 0; +int sqlite3_io_error_pending = 0; +int sqlite3_io_error_persist = 0; +int sqlite3_diskfull_pending = 0; +int sqlite3_diskfull = 0; +#define SimulateIOError(CODE) \ + if( sqlite3_io_error_pending || sqlite3_io_error_hit ) \ + if( sqlite3_io_error_pending-- == 1 \ + || (sqlite3_io_error_persist && sqlite3_io_error_hit) ) \ + { local_ioerr(); CODE; } +static void local_ioerr(){ + IOTRACE(("IOERR\n")); + sqlite3_io_error_hit = 1; +} +#define SimulateDiskfullError(CODE) \ + if( sqlite3_diskfull_pending ){ \ + if( sqlite3_diskfull_pending == 1 ){ \ + local_ioerr(); \ + sqlite3_diskfull = 1; \ + sqlite3_io_error_hit = 1; \ + CODE; \ + }else{ \ + sqlite3_diskfull_pending--; \ + } \ + } +#else +#define SimulateIOError(A) +#define SimulateDiskfullError(A) +#endif + +/* +** When testing, keep a count of the number of open files. +*/ +#ifdef SQLITE_TEST +int sqlite3_open_file_count = 0; +#define OpenCounter(X) sqlite3_open_file_count+=(X) +#else +#define OpenCounter(X) +#endif diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/os_os2.c b/libraries/sqlite/unix/sqlite-3.5.1/src/os_os2.c new file mode 100644 index 0000000000..c4fbe66d47 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/os_os2.c @@ -0,0 +1,1032 @@ +/* +** 2006 Feb 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that is specific to OS/2. +*/ + +#include "sqliteInt.h" + +#if OS_OS2 + +/* +** Macros used to determine whether or not to use threads. +*/ +#if defined(THREADSAFE) && THREADSAFE +# define SQLITE_OS2_THREADS 1 +#endif + +/* +** Include code that is common to all os_*.c files +*/ +#include "os_common.h" + +/* +** The os2File structure is subclass of OsFile specific for the OS/2 +** protability layer. +*/ +typedef struct os2File os2File; +struct os2File { + IoMethod const *pMethod; /* Always the first entry */ + HFILE h; /* Handle for accessing the file */ + int delOnClose; /* True if file is to be deleted on close */ + char* pathToDel; /* Name of file to delete on close */ + unsigned char locktype; /* Type of lock currently held on this file */ +}; + +/* +** Do not include any of the File I/O interface procedures if the +** SQLITE_OMIT_DISKIO macro is defined (indicating that there database +** will be in-memory only) +*/ +#ifndef SQLITE_OMIT_DISKIO + +/* +** Delete the named file +*/ +int sqlite3Os2Delete( const char *zFilename ){ + APIRET rc = NO_ERROR; + + rc = DosDelete( (PSZ)zFilename ); + OSTRACE2( "DELETE \"%s\"\n", zFilename ); + return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; +} + +/* +** Return TRUE if the named file exists. +*/ +int sqlite3Os2FileExists( const char *zFilename ){ + FILESTATUS3 fsts3ConfigInfo; + memset(&fsts3ConfigInfo, 0, sizeof(fsts3ConfigInfo)); + return DosQueryPathInfo( (PSZ)zFilename, FIL_STANDARD, + &fsts3ConfigInfo, sizeof(FILESTATUS3) ) == NO_ERROR; +} + +/* Forward declaration */ +int allocateOs2File( os2File *pInit, OsFile **pld ); + +/* +** Attempt to open a file for both reading and writing. If that +** fails, try opening it read-only. If the file does not exist, +** try to create it. +** +** On success, a handle for the open file is written to *id +** and *pReadonly is set to 0 if the file was opened for reading and +** writing or 1 if the file was opened read-only. The function returns +** SQLITE_OK. +** +** On failure, the function returns SQLITE_CANTOPEN and leaves +** *id and *pReadonly unchanged. +*/ +int sqlite3Os2OpenReadWrite( + const char *zFilename, + OsFile **pld, + int *pReadonly +){ + os2File f; + HFILE hf; + ULONG ulAction; + APIRET rc = NO_ERROR; + + assert( *pld == 0 ); + rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, + FILE_ARCHIVED | FILE_NORMAL, + OPEN_ACTION_CREATE_IF_NEW | OPEN_ACTION_OPEN_IF_EXISTS, + OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | + OPEN_SHARE_DENYNONE | OPEN_ACCESS_READWRITE, (PEAOP2)NULL ); + if( rc != NO_ERROR ){ + rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, + FILE_ARCHIVED | FILE_NORMAL, + OPEN_ACTION_CREATE_IF_NEW | OPEN_ACTION_OPEN_IF_EXISTS, + OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | + OPEN_SHARE_DENYWRITE | OPEN_ACCESS_READONLY, (PEAOP2)NULL ); + if( rc != NO_ERROR ){ + return SQLITE_CANTOPEN; + } + *pReadonly = 1; + } + else{ + *pReadonly = 0; + } + f.h = hf; + f.locktype = NO_LOCK; + f.delOnClose = 0; + f.pathToDel = NULL; + OpenCounter(+1); + OSTRACE3( "OPEN R/W %d \"%s\"\n", hf, zFilename ); + return allocateOs2File( &f, pld ); +} + + +/* +** Attempt to open a new file for exclusive access by this process. +** The file will be opened for both reading and writing. To avoid +** a potential security problem, we do not allow the file to have +** previously existed. Nor do we allow the file to be a symbolic +** link. +** +** If delFlag is true, then make arrangements to automatically delete +** the file when it is closed. +** +** On success, write the file handle into *id and return SQLITE_OK. +** +** On failure, return SQLITE_CANTOPEN. +*/ +int sqlite3Os2OpenExclusive( const char *zFilename, OsFile **pld, int delFlag ){ + os2File f; + HFILE hf; + ULONG ulAction; + APIRET rc = NO_ERROR; + + assert( *pld == 0 ); + rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, FILE_NORMAL, + OPEN_ACTION_CREATE_IF_NEW | OPEN_ACTION_REPLACE_IF_EXISTS, + OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | + OPEN_SHARE_DENYREADWRITE | OPEN_ACCESS_READWRITE, (PEAOP2)NULL ); + if( rc != NO_ERROR ){ + return SQLITE_CANTOPEN; + } + + f.h = hf; + f.locktype = NO_LOCK; + f.delOnClose = delFlag ? 1 : 0; + f.pathToDel = delFlag ? sqlite3OsFullPathname( zFilename ) : NULL; + OpenCounter( +1 ); + if( delFlag ) DosForceDelete( (PSZ)sqlite3OsFullPathname( zFilename ) ); + OSTRACE3( "OPEN EX %d \"%s\"\n", hf, sqlite3OsFullPathname ( zFilename ) ); + return allocateOs2File( &f, pld ); +} + +/* +** Attempt to open a new file for read-only access. +** +** On success, write the file handle into *id and return SQLITE_OK. +** +** On failure, return SQLITE_CANTOPEN. +*/ +int sqlite3Os2OpenReadOnly( const char *zFilename, OsFile **pld ){ + os2File f; + HFILE hf; + ULONG ulAction; + APIRET rc = NO_ERROR; + + assert( *pld == 0 ); + rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, + FILE_NORMAL, OPEN_ACTION_OPEN_IF_EXISTS, + OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | + OPEN_SHARE_DENYWRITE | OPEN_ACCESS_READONLY, (PEAOP2)NULL ); + if( rc != NO_ERROR ){ + return SQLITE_CANTOPEN; + } + f.h = hf; + f.locktype = NO_LOCK; + f.delOnClose = 0; + f.pathToDel = NULL; + OpenCounter( +1 ); + OSTRACE3( "OPEN RO %d \"%s\"\n", hf, zFilename ); + return allocateOs2File( &f, pld ); +} + +/* +** Attempt to open a file descriptor for the directory that contains a +** file. This file descriptor can be used to fsync() the directory +** in order to make sure the creation of a new file is actually written +** to disk. +** +** This routine is only meaningful for Unix. It is a no-op under +** OS/2 since OS/2 does not support hard links. +** +** On success, a handle for a previously open file is at *id is +** updated with the new directory file descriptor and SQLITE_OK is +** returned. +** +** On failure, the function returns SQLITE_CANTOPEN and leaves +** *id unchanged. +*/ +int os2OpenDirectory( + OsFile *id, + const char *zDirname +){ + return SQLITE_OK; +} + +/* +** Create a temporary file name in zBuf. zBuf must be big enough to +** hold at least SQLITE_TEMPNAME_SIZE characters. +*/ +int sqlite3Os2TempFileName( char *zBuf ){ + static const unsigned char zChars[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789"; + int i, j; + PSZ zTempPath = 0; + if( DosScanEnv( (PSZ)"TEMP", &zTempPath ) ){ + if( DosScanEnv( (PSZ)"TMP", &zTempPath ) ){ + if( DosScanEnv( (PSZ)"TMPDIR", &zTempPath ) ){ + ULONG ulDriveNum = 0, ulDriveMap = 0; + DosQueryCurrentDisk( &ulDriveNum, &ulDriveMap ); + sprintf( (char*)zTempPath, "%c:", (char)( 'A' + ulDriveNum - 1 ) ); + } + } + } + /* strip off a trailing slashes or backslashes, otherwise we would get * + * multiple (back)slashes which causes DosOpen() to fail */ + j = strlen(zTempPath); + while( j > 0 && zTempPath[j-1] == '\\' || zTempPath[j-1] == '/' ){ + j--; + } + zTempPath[j] = '\0'; + for(;;){ + sprintf( zBuf, "%s\\"TEMP_FILE_PREFIX, zTempPath ); + j = strlen( zBuf ); + sqlite3Randomness( 15, &zBuf[j] ); + for( i = 0; i < 15; i++, j++ ){ + zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; + } + zBuf[j] = 0; + if( !sqlite3OsFileExists( zBuf ) ) break; + } + OSTRACE2( "TEMP FILENAME: %s\n", zBuf ); + return SQLITE_OK; +} + +/* +** Close a file. +*/ +int os2Close( OsFile **pld ){ + os2File *pFile; + APIRET rc = NO_ERROR; + if( pld && (pFile = (os2File*)*pld) != 0 ){ + OSTRACE2( "CLOSE %d\n", pFile->h ); + rc = DosClose( pFile->h ); + pFile->locktype = NO_LOCK; + if( pFile->delOnClose != 0 ){ + rc = DosForceDelete( (PSZ)pFile->pathToDel ); + } + *pld = 0; + OpenCounter( -1 ); + } + + return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; +} + +/* +** Read data from a file into a buffer. Return SQLITE_OK if all +** bytes were read successfully and SQLITE_IOERR if anything goes +** wrong. +*/ +int os2Read( OsFile *id, void *pBuf, int amt ){ + ULONG got; + assert( id!=0 ); + SimulateIOError( return SQLITE_IOERR ); + OSTRACE3( "READ %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); + DosRead( ((os2File*)id)->h, pBuf, amt, &got ); + if (got == (ULONG)amt) + return SQLITE_OK; + else if (got == 0) + return SQLITE_IOERR_READ; + else { + memset(&((char*)pBuf)[got], 0, amt-got); + return SQLITE_IOERR_SHORT_READ; + } +} + +/* +** Write data from a buffer into a file. Return SQLITE_OK on success +** or some other error code on failure. +*/ +int os2Write( OsFile *id, const void *pBuf, int amt ){ + APIRET rc = NO_ERROR; + ULONG wrote; + assert( id!=0 ); + SimulateIOError( return SQLITE_IOERR ); + SimulateDiskfullError( return SQLITE_FULL ); + OSTRACE3( "WRITE %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); + while( amt > 0 && + (rc = DosWrite( ((os2File*)id)->h, (PVOID)pBuf, amt, &wrote )) && wrote > 0 ){ + amt -= wrote; + pBuf = &((char*)pBuf)[wrote]; + } + + return ( rc != NO_ERROR || amt > (int)wrote ) ? SQLITE_FULL : SQLITE_OK; +} + +/* +** Move the read/write pointer in a file. +*/ +int os2Seek( OsFile *id, i64 offset ){ + APIRET rc = NO_ERROR; + ULONG filePointer = 0L; + assert( id!=0 ); + rc = DosSetFilePtr( ((os2File*)id)->h, offset, FILE_BEGIN, &filePointer ); + OSTRACE3( "SEEK %d %lld\n", ((os2File*)id)->h, offset ); + return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; +} + +/* +** Make sure all writes to a particular file are committed to disk. +*/ +int os2Sync( OsFile *id, int dataOnly ){ + assert( id!=0 ); + OSTRACE3( "SYNC %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); + return DosResetBuffer( ((os2File*)id)->h ) == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; +} + +/* +** Sync the directory zDirname. This is a no-op on operating systems other +** than UNIX. +*/ +int sqlite3Os2SyncDirectory( const char *zDirname ){ + SimulateIOError( return SQLITE_IOERR ); + return SQLITE_OK; +} + +/* +** Truncate an open file to a specified size +*/ +int os2Truncate( OsFile *id, i64 nByte ){ + APIRET rc = NO_ERROR; + ULONG upperBits = nByte>>32; + assert( id!=0 ); + OSTRACE3( "TRUNCATE %d %lld\n", ((os2File*)id)->h, nByte ); + SimulateIOError( return SQLITE_IOERR ); + rc = DosSetFilePtr( ((os2File*)id)->h, nByte, FILE_BEGIN, &upperBits ); + if( rc != NO_ERROR ){ + return SQLITE_IOERR; + } + rc = DosSetFilePtr( ((os2File*)id)->h, 0L, FILE_END, &upperBits ); + return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; +} + +/* +** Determine the current size of a file in bytes +*/ +int os2FileSize( OsFile *id, i64 *pSize ){ + APIRET rc = NO_ERROR; + FILESTATUS3 fsts3FileInfo; + memset(&fsts3FileInfo, 0, sizeof(fsts3FileInfo)); + assert( id!=0 ); + SimulateIOError( return SQLITE_IOERR ); + rc = DosQueryFileInfo( ((os2File*)id)->h, FIL_STANDARD, &fsts3FileInfo, sizeof(FILESTATUS3) ); + if( rc == NO_ERROR ){ + *pSize = fsts3FileInfo.cbFile; + return SQLITE_OK; + } + else{ + return SQLITE_IOERR; + } +} + +/* +** Acquire a reader lock. +*/ +static int getReadLock( os2File *id ){ + FILELOCK LockArea, + UnlockArea; + memset(&LockArea, 0, sizeof(LockArea)); + memset(&UnlockArea, 0, sizeof(UnlockArea)); + LockArea.lOffset = SHARED_FIRST; + LockArea.lRange = SHARED_SIZE; + UnlockArea.lOffset = 0L; + UnlockArea.lRange = 0L; + return DosSetFileLocks( id->h, &UnlockArea, &LockArea, 2000L, 1L ); +} + +/* +** Undo a readlock +*/ +static int unlockReadLock( os2File *id ){ + FILELOCK LockArea, + UnlockArea; + memset(&LockArea, 0, sizeof(LockArea)); + memset(&UnlockArea, 0, sizeof(UnlockArea)); + LockArea.lOffset = 0L; + LockArea.lRange = 0L; + UnlockArea.lOffset = SHARED_FIRST; + UnlockArea.lRange = SHARED_SIZE; + return DosSetFileLocks( id->h, &UnlockArea, &LockArea, 2000L, 1L ); +} + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* +** Check that a given pathname is a directory and is writable +** +*/ +int sqlite3Os2IsDirWritable( char *zDirname ){ + FILESTATUS3 fsts3ConfigInfo; + APIRET rc = NO_ERROR; + memset(&fsts3ConfigInfo, 0, sizeof(fsts3ConfigInfo)); + if( zDirname==0 ) return 0; + if( strlen(zDirname)>CCHMAXPATH ) return 0; + rc = DosQueryPathInfo( (PSZ)zDirname, FIL_STANDARD, &fsts3ConfigInfo, sizeof(FILESTATUS3) ); + if( rc != NO_ERROR ) return 0; + if( (fsts3ConfigInfo.attrFile & FILE_DIRECTORY) != FILE_DIRECTORY ) return 0; + + return 1; +} +#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. The os2Unlock() routine +** erases all locks at once and returns us immediately to locking level 0. +** It is not possible to lower the locking level one step at a time. You +** must go straight to locking level 0. +*/ +int os2Lock( OsFile *id, int locktype ){ + APIRET rc = SQLITE_OK; /* Return code from subroutines */ + APIRET res = NO_ERROR; /* Result of an OS/2 lock call */ + int newLocktype; /* Set id->locktype to this value before exiting */ + int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */ + FILELOCK LockArea, + UnlockArea; + os2File *pFile = (os2File*)id; + memset(&LockArea, 0, sizeof(LockArea)); + memset(&UnlockArea, 0, sizeof(UnlockArea)); + assert( pFile!=0 ); + OSTRACE4( "LOCK %d %d was %d\n", pFile->h, locktype, pFile->locktype ); + + /* If there is already a lock of this type or more restrictive on the + ** OsFile, do nothing. Don't use the end_lock: exit path, as + ** sqlite3OsEnterMutex() hasn't been called yet. + */ + if( pFile->locktype>=locktype ){ + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct + */ + assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); + assert( locktype!=PENDING_LOCK ); + assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + + /* Lock the PENDING_LOCK byte if we need to acquire a PENDING lock or + ** a SHARED lock. If we are acquiring a SHARED lock, the acquisition of + ** the PENDING_LOCK byte is temporary. + */ + newLocktype = pFile->locktype; + if( pFile->locktype==NO_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktype==RESERVED_LOCK) + ){ + int cnt = 3; + + LockArea.lOffset = PENDING_BYTE; + LockArea.lRange = 1L; + UnlockArea.lOffset = 0L; + UnlockArea.lRange = 0L; + + while( cnt-->0 && (res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L) )!=NO_ERROR ){ + /* Try 3 times to get the pending lock. The pending lock might be + ** held by another reader process who will release it momentarily. + */ + OSTRACE2( "could not get a PENDING lock. cnt=%d\n", cnt ); + DosSleep(1); + } + gotPendingLock = res; + } + + /* Acquire a shared lock + */ + if( locktype==SHARED_LOCK && res ){ + assert( pFile->locktype==NO_LOCK ); + res = getReadLock(pFile); + if( res == NO_ERROR ){ + newLocktype = SHARED_LOCK; + } + } + + /* Acquire a RESERVED lock + */ + if( locktype==RESERVED_LOCK && res ){ + assert( pFile->locktype==SHARED_LOCK ); + LockArea.lOffset = RESERVED_BYTE; + LockArea.lRange = 1L; + UnlockArea.lOffset = 0L; + UnlockArea.lRange = 0L; + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + if( res == NO_ERROR ){ + newLocktype = RESERVED_LOCK; + } + } + + /* Acquire a PENDING lock + */ + if( locktype==EXCLUSIVE_LOCK && res ){ + newLocktype = PENDING_LOCK; + gotPendingLock = 0; + } + + /* Acquire an EXCLUSIVE lock + */ + if( locktype==EXCLUSIVE_LOCK && res ){ + assert( pFile->locktype>=SHARED_LOCK ); + res = unlockReadLock(pFile); + OSTRACE2( "unreadlock = %d\n", res ); + LockArea.lOffset = SHARED_FIRST; + LockArea.lRange = SHARED_SIZE; + UnlockArea.lOffset = 0L; + UnlockArea.lRange = 0L; + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + if( res == NO_ERROR ){ + newLocktype = EXCLUSIVE_LOCK; + }else{ + OSTRACE2( "error-code = %d\n", res ); + } + } + + /* If we are holding a PENDING lock that ought to be released, then + ** release it now. + */ + if( gotPendingLock && locktype==SHARED_LOCK ){ + LockArea.lOffset = 0L; + LockArea.lRange = 0L; + UnlockArea.lOffset = PENDING_BYTE; + UnlockArea.lRange = 1L; + DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + } + + /* Update the state of the lock has held in the file descriptor then + ** return the appropriate result code. + */ + if( res == NO_ERROR ){ + rc = SQLITE_OK; + }else{ + OSTRACE4( "LOCK FAILED %d trying for %d but got %d\n", pFile->h, + locktype, newLocktype ); + rc = SQLITE_BUSY; + } + pFile->locktype = newLocktype; + return rc; +} + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, return +** non-zero, otherwise zero. +*/ +int os2CheckReservedLock( OsFile *id ){ + APIRET rc = NO_ERROR; + os2File *pFile = (os2File*)id; + assert( pFile!=0 ); + if( pFile->locktype>=RESERVED_LOCK ){ + rc = 1; + OSTRACE3( "TEST WR-LOCK %d %d (local)\n", pFile->h, rc ); + }else{ + FILELOCK LockArea, + UnlockArea; + memset(&LockArea, 0, sizeof(LockArea)); + memset(&UnlockArea, 0, sizeof(UnlockArea)); + LockArea.lOffset = RESERVED_BYTE; + LockArea.lRange = 1L; + UnlockArea.lOffset = 0L; + UnlockArea.lRange = 0L; + rc = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + if( rc == NO_ERROR ){ + LockArea.lOffset = 0L; + LockArea.lRange = 0L; + UnlockArea.lOffset = RESERVED_BYTE; + UnlockArea.lRange = 1L; + rc = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + } + OSTRACE3( "TEST WR-LOCK %d %d (remote)\n", pFile->h, rc ); + } + return rc; +} + +/* +** Lower the locking level on file descriptor id to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +** +** It is not possible for this routine to fail if the second argument +** is NO_LOCK. If the second argument is SHARED_LOCK then this routine +** might return SQLITE_IOERR; +*/ +int os2Unlock( OsFile *id, int locktype ){ + int type; + APIRET rc = SQLITE_OK; + os2File *pFile = (os2File*)id; + FILELOCK LockArea, + UnlockArea; + memset(&LockArea, 0, sizeof(LockArea)); + memset(&UnlockArea, 0, sizeof(UnlockArea)); + assert( pFile!=0 ); + assert( locktype<=SHARED_LOCK ); + OSTRACE4( "UNLOCK %d to %d was %d\n", pFile->h, locktype, pFile->locktype ); + type = pFile->locktype; + if( type>=EXCLUSIVE_LOCK ){ + LockArea.lOffset = 0L; + LockArea.lRange = 0L; + UnlockArea.lOffset = SHARED_FIRST; + UnlockArea.lRange = SHARED_SIZE; + DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + if( locktype==SHARED_LOCK && getReadLock(pFile) != NO_ERROR ){ + /* This should never happen. We should always be able to + ** reacquire the read lock */ + rc = SQLITE_IOERR; + } + } + if( type>=RESERVED_LOCK ){ + LockArea.lOffset = 0L; + LockArea.lRange = 0L; + UnlockArea.lOffset = RESERVED_BYTE; + UnlockArea.lRange = 1L; + DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + } + if( locktype==NO_LOCK && type>=SHARED_LOCK ){ + unlockReadLock(pFile); + } + if( type>=PENDING_LOCK ){ + LockArea.lOffset = 0L; + LockArea.lRange = 0L; + UnlockArea.lOffset = PENDING_BYTE; + UnlockArea.lRange = 1L; + DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + } + pFile->locktype = locktype; + return rc; +} + +/* +** Turn a relative pathname into a full pathname. Return a pointer +** to the full pathname stored in space obtained from sqliteMalloc(). +** The calling function is responsible for freeing this space once it +** is no longer needed. +*/ +char *sqlite3Os2FullPathname( const char *zRelative ){ + char *zFull = 0; + if( strchr(zRelative, ':') ){ + sqlite3SetString( &zFull, zRelative, (char*)0 ); + }else{ + ULONG ulDriveNum = 0; + ULONG ulDriveMap = 0; + ULONG cbzBufLen = SQLITE_TEMPNAME_SIZE; + char zDrive[2]; + char *zBuff; + + zBuff = sqliteMalloc( cbzBufLen ); + if( zBuff != 0 ){ + DosQueryCurrentDisk( &ulDriveNum, &ulDriveMap ); + if( DosQueryCurrentDir( ulDriveNum, (PBYTE)zBuff, &cbzBufLen ) == NO_ERROR ){ + sprintf( zDrive, "%c", (char)('A' + ulDriveNum - 1) ); + sqlite3SetString( &zFull, zDrive, ":\\", zBuff, + "\\", zRelative, (char*)0 ); + } + sqliteFree( zBuff ); + } + } + return zFull; +} + +/* +** The fullSync option is meaningless on os2, or correct me if I'm wrong. This is a no-op. +** From os_unix.c: Change the value of the fullsync flag in the given file descriptor. +** From os_unix.c: ((unixFile*)id)->fullSync = v; +*/ +static void os2SetFullSync( OsFile *id, int v ){ + return; +} + +/* +** Return the underlying file handle for an OsFile +*/ +static int os2FileHandle( OsFile *id ){ + return (int)((os2File*)id)->h; +} + +/* +** Return an integer that indices the type of lock currently held +** by this handle. (Used for testing and analysis only.) +*/ +static int os2LockState( OsFile *id ){ + return ((os2File*)id)->locktype; +} + +/* +** Return the sector size in bytes of the underlying block device for +** the specified file. This is almost always 512 bytes, but may be +** larger for some devices. +** +** SQLite code assumes this function cannot fail. It also assumes that +** if two files are created in the same file-system directory (i.e. +** a database and it's journal file) that the sector size will be the +** same for both. +*/ +static int os2SectorSize(OsFile *id){ + return SQLITE_DEFAULT_SECTOR_SIZE; +} + +/* +** This vector defines all the methods that can operate on an OsFile +** for os2. +*/ +static const IoMethod sqlite3Os2IoMethod = { + os2Close, + os2OpenDirectory, + os2Read, + os2Write, + os2Seek, + os2Truncate, + os2Sync, + os2SetFullSync, + os2FileHandle, + os2FileSize, + os2Lock, + os2Unlock, + os2LockState, + os2CheckReservedLock, + os2SectorSize, +}; + +/* +** Allocate memory for an OsFile. Initialize the new OsFile +** to the value given in pInit and return a pointer to the new +** OsFile. If we run out of memory, close the file and return NULL. +*/ +int allocateOs2File( os2File *pInit, OsFile **pld ){ + os2File *pNew; + pNew = sqliteMalloc( sizeof(*pNew) ); + if( pNew==0 ){ + DosClose( pInit->h ); + *pld = 0; + return SQLITE_NOMEM; + }else{ + *pNew = *pInit; + pNew->pMethod = &sqlite3Os2IoMethod; + pNew->locktype = NO_LOCK; + *pld = (OsFile*)pNew; + OpenCounter(+1); + return SQLITE_OK; + } +} + +#endif /* SQLITE_OMIT_DISKIO */ +/*************************************************************************** +** Everything above deals with file I/O. Everything that follows deals +** with other miscellanous aspects of the operating system interface +****************************************************************************/ + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +void *sqlite3Os2Dlopen(const char *zFilename){ + UCHAR loadErr[256]; + HMODULE hmod; + APIRET rc; + rc = DosLoadModule((PSZ)loadErr, sizeof(loadErr), zFilename, &hmod); + if (rc != NO_ERROR) return 0; + return (void*)hmod; +} +void *sqlite3Os2Dlsym(void *pHandle, const char *zSymbol){ + PFN pfn; + APIRET rc; + rc = DosQueryProcAddr((HMODULE)pHandle, 0L, zSymbol, &pfn); + if (rc != NO_ERROR) { + /* if the symbol itself was not found, search again for the same + * symbol with an extra underscore, that might be needed depending + * on the calling convention */ + char _zSymbol[256] = "_"; + strncat(_zSymbol, zSymbol, 255); + rc = DosQueryProcAddr((HMODULE)pHandle, 0L, _zSymbol, &pfn); + } + if (rc != NO_ERROR) return 0; + return (void *)pfn; +} +int sqlite3Os2Dlclose(void *pHandle){ + return DosFreeModule((HMODULE)pHandle); +} +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ + + +/* +** Get information to seed the random number generator. The seed +** is written into the buffer zBuf[256]. The calling function must +** supply a sufficiently large buffer. +*/ +int sqlite3Os2RandomSeed( char *zBuf ){ + /* We have to initialize zBuf to prevent valgrind from reporting + ** errors. The reports issued by valgrind are incorrect - we would + ** prefer that the randomness be increased by making use of the + ** uninitialized space in zBuf - but valgrind errors tend to worry + ** some users. Rather than argue, it seems easier just to initialize + ** the whole array and silence valgrind, even if that means less randomness + ** in the random seed. + ** + ** When testing, initializing zBuf[] to zero is all we do. That means + ** that we always use the same random number sequence. This makes the + ** tests repeatable. + */ + memset( zBuf, 0, 256 ); + DosGetDateTime( (PDATETIME)zBuf ); + return SQLITE_OK; +} + +/* +** Sleep for a little while. Return the amount of time slept. +*/ +int sqlite3Os2Sleep( int ms ){ + DosSleep( ms ); + return ms; +} + +/* +** Static variables used for thread synchronization +*/ +static int inMutex = 0; +#ifdef SQLITE_OS2_THREADS +static ULONG mutexOwner; +#endif + +/* +** The following pair of routines implement mutual exclusion for +** multi-threaded processes. Only a single thread is allowed to +** executed code that is surrounded by EnterMutex() and LeaveMutex(). +** +** SQLite uses only a single Mutex. There is not much critical +** code and what little there is executes quickly and without blocking. +*/ +void sqlite3Os2EnterMutex(){ +#ifdef SQLITE_OS2_THREADS + PTIB ptib; + DosEnterCritSec(); + DosGetInfoBlocks( &ptib, NULL ); + mutexOwner = ptib->tib_ptib2->tib2_ultid; +#endif + assert( !inMutex ); + inMutex = 1; +} +void sqlite3Os2LeaveMutex(){ +#ifdef SQLITE_OS2_THREADS + PTIB ptib; +#endif + assert( inMutex ); + inMutex = 0; +#ifdef SQLITE_OS2_THREADS + DosGetInfoBlocks( &ptib, NULL ); + assert( mutexOwner == ptib->tib_ptib2->tib2_ultid ); + DosExitCritSec(); +#endif +} + +/* +** Return TRUE if the mutex is currently held. +** +** If the thisThreadOnly parameter is true, return true if and only if the +** calling thread holds the mutex. If the parameter is false, return +** true if any thread holds the mutex. +*/ +int sqlite3Os2InMutex( int thisThreadOnly ){ +#ifdef SQLITE_OS2_THREADS + PTIB ptib; + DosGetInfoBlocks( &ptib, NULL ); + return inMutex>0 && (thisThreadOnly==0 || mutexOwner==ptib->tib_ptib2->tib2_ultid); +#else + return inMutex>0; +#endif +} + +/* +** The following variable, if set to a non-zero value, becomes the result +** returned from sqlite3OsCurrentTime(). This is used for testing. +*/ +#ifdef SQLITE_TEST +int sqlite3_current_time = 0; +#endif + +/* +** Find the current time (in Universal Coordinated Time). Write the +** current time and date as a Julian Day number into *prNow and +** return 0. Return 1 if the time and date cannot be found. +*/ +int sqlite3Os2CurrentTime( double *prNow ){ + double now; + USHORT second, minute, hour, + day, month, year; + DATETIME dt; + DosGetDateTime( &dt ); + second = (USHORT)dt.seconds; + minute = (USHORT)dt.minutes + dt.timezone; + hour = (USHORT)dt.hours; + day = (USHORT)dt.day; + month = (USHORT)dt.month; + year = (USHORT)dt.year; + + /* Calculations from http://www.astro.keele.ac.uk/~rno/Astronomy/hjd.html + http://www.astro.keele.ac.uk/~rno/Astronomy/hjd-0.1.c */ + /* Calculate the Julian days */ + now = day - 32076 + + 1461*(year + 4800 + (month - 14)/12)/4 + + 367*(month - 2 - (month - 14)/12*12)/12 - + 3*((year + 4900 + (month - 14)/12)/100)/4; + + /* Add the fractional hours, mins and seconds */ + now += (hour + 12.0)/24.0; + now += minute/1440.0; + now += second/86400.0; + *prNow = now; +#ifdef SQLITE_TEST + if( sqlite3_current_time ){ + *prNow = sqlite3_current_time/86400.0 + 2440587.5; + } +#endif + return 0; +} + +/* +** Remember the number of thread-specific-data blocks allocated. +** Use this to verify that we are not leaking thread-specific-data. +** Ticket #1601 +*/ +#ifdef SQLITE_TEST +int sqlite3_tsd_count = 0; +# define TSD_COUNTER_INCR InterlockedIncrement( &sqlite3_tsd_count ) +# define TSD_COUNTER_DECR InterlockedDecrement( &sqlite3_tsd_count ) +#else +# define TSD_COUNTER_INCR /* no-op */ +# define TSD_COUNTER_DECR /* no-op */ +#endif + +/* +** If called with allocateFlag>1, then return a pointer to thread +** specific data for the current thread. Allocate and zero the +** thread-specific data if it does not already exist necessary. +** +** If called with allocateFlag==0, then check the current thread +** specific data. Return it if it exists. If it does not exist, +** then return NULL. +** +** If called with allocateFlag<0, check to see if the thread specific +** data is allocated and is all zero. If it is then deallocate it. +** Return a pointer to the thread specific data or NULL if it is +** unallocated or gets deallocated. +*/ +ThreadData *sqlite3Os2ThreadSpecificData( int allocateFlag ){ + static ThreadData **s_ppTsd = NULL; + static const ThreadData zeroData = {0, 0, 0}; + ThreadData *pTsd; + + if( !s_ppTsd ){ + sqlite3OsEnterMutex(); + if( !s_ppTsd ){ + PULONG pul; + APIRET rc = DosAllocThreadLocalMemory(1, &pul); + if( rc != NO_ERROR ){ + sqlite3OsLeaveMutex(); + return 0; + } + s_ppTsd = (ThreadData **)pul; + } + sqlite3OsLeaveMutex(); + } + pTsd = *s_ppTsd; + if( allocateFlag>0 ){ + if( !pTsd ){ + pTsd = sqlite3OsMalloc( sizeof(zeroData) ); + if( pTsd ){ + *pTsd = zeroData; + *s_ppTsd = pTsd; + TSD_COUNTER_INCR; + } + } + }else if( pTsd!=0 && allocateFlag<0 + && memcmp( pTsd, &zeroData, sizeof(ThreadData) )==0 ){ + sqlite3OsFree(pTsd); + *s_ppTsd = NULL; + TSD_COUNTER_DECR; + pTsd = 0; + } + return pTsd; +} +#endif /* OS_OS2 */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/os_unix.c b/libraries/sqlite/unix/sqlite-3.5.1/src/os_unix.c new file mode 100644 index 0000000000..e95435e74b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/os_unix.c @@ -0,0 +1,2749 @@ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that is specific to Unix systems. +*/ +#include "sqliteInt.h" +#if OS_UNIX /* This file is used on unix only */ + +/* #define SQLITE_ENABLE_LOCKING_STYLE 0 */ + +/* +** These #defines should enable >2GB file support on Posix if the +** underlying operating system supports it. If the OS lacks +** large file support, these should be no-ops. +** +** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch +** on the compiler command line. This is necessary if you are compiling +** on a recent machine (ex: RedHat 7.2) but you want your code to work +** on an older machine (ex: RedHat 6.0). If you compile on RedHat 7.2 +** without this option, LFS is enable. But LFS does not exist in the kernel +** in RedHat 6.0, so the code won't work. Hence, for maximum binary +** portability you should omit LFS. +*/ +#ifndef SQLITE_DISABLE_LFS +# define _LARGE_FILE 1 +# ifndef _FILE_OFFSET_BITS +# define _FILE_OFFSET_BITS 64 +# endif +# define _LARGEFILE_SOURCE 1 +#endif + +/* +** standard include files. +*/ +#include +#include +#include +#include +#include +#include +#include +#ifdef SQLITE_ENABLE_LOCKING_STYLE +#include +#include +#include +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + +/* +** If we are to be thread-safe, include the pthreads header and define +** the SQLITE_UNIX_THREADS macro. +*/ +#if SQLITE_THREADSAFE +# include +# define SQLITE_UNIX_THREADS 1 +#endif + +/* +** Default permissions when creating a new file +*/ +#ifndef SQLITE_DEFAULT_FILE_PERMISSIONS +# define SQLITE_DEFAULT_FILE_PERMISSIONS 0644 +#endif + +/* +** Maximum supported path-length. +*/ +#define MAX_PATHNAME 512 + + +/* +** The unixFile structure is subclass of sqlite3_file specific for the unix +** protability layer. +*/ +typedef struct unixFile unixFile; +struct unixFile { + sqlite3_io_methods const *pMethod; /* Always the first entry */ +#ifdef SQLITE_TEST + /* In test mode, increase the size of this structure a bit so that + ** it is larger than the struct CrashFile defined in test6.c. + */ + char aPadding[32]; +#endif + struct openCnt *pOpen; /* Info about all open fd's on this inode */ + struct lockInfo *pLock; /* Info about locks on this inode */ +#ifdef SQLITE_ENABLE_LOCKING_STYLE + void *lockingContext; /* Locking style specific state */ +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + int h; /* The file descriptor */ + unsigned char locktype; /* The type of lock held on this fd */ + int dirfd; /* File descriptor for the directory */ +#if SQLITE_THREADSAFE + pthread_t tid; /* The thread that "owns" this unixFile */ +#endif +}; + +/* +** Include code that is common to all os_*.c files +*/ +#include "os_common.h" + +/* +** Define various macros that are missing from some systems. +*/ +#ifndef O_LARGEFILE +# define O_LARGEFILE 0 +#endif +#ifdef SQLITE_DISABLE_LFS +# undef O_LARGEFILE +# define O_LARGEFILE 0 +#endif +#ifndef O_NOFOLLOW +# define O_NOFOLLOW 0 +#endif +#ifndef O_BINARY +# define O_BINARY 0 +#endif + +/* +** The DJGPP compiler environment looks mostly like Unix, but it +** lacks the fcntl() system call. So redefine fcntl() to be something +** that always succeeds. This means that locking does not occur under +** DJGPP. But it's DOS - what did you expect? +*/ +#ifdef __DJGPP__ +# define fcntl(A,B,C) 0 +#endif + +/* +** The threadid macro resolves to the thread-id or to 0. Used for +** testing and debugging only. +*/ +#if SQLITE_THREADSAFE +#define threadid pthread_self() +#else +#define threadid 0 +#endif + +/* +** Set or check the unixFile.tid field. This field is set when an unixFile +** is first opened. All subsequent uses of the unixFile verify that the +** same thread is operating on the unixFile. Some operating systems do +** not allow locks to be overridden by other threads and that restriction +** means that sqlite3* database handles cannot be moved from one thread +** to another. This logic makes sure a user does not try to do that +** by mistake. +** +** Version 3.3.1 (2006-01-15): unixFile can be moved from one thread to +** another as long as we are running on a system that supports threads +** overriding each others locks (which now the most common behavior) +** or if no locks are held. But the unixFile.pLock field needs to be +** recomputed because its key includes the thread-id. See the +** transferOwnership() function below for additional information +*/ +#if SQLITE_THREADSAFE +# define SET_THREADID(X) (X)->tid = pthread_self() +# define CHECK_THREADID(X) (threadsOverrideEachOthersLocks==0 && \ + !pthread_equal((X)->tid, pthread_self())) +#else +# define SET_THREADID(X) +# define CHECK_THREADID(X) 0 +#endif + +/* +** Here is the dirt on POSIX advisory locks: ANSI STD 1003.1 (1996) +** section 6.5.2.2 lines 483 through 490 specify that when a process +** sets or clears a lock, that operation overrides any prior locks set +** by the same process. It does not explicitly say so, but this implies +** that it overrides locks set by the same process using a different +** file descriptor. Consider this test case: +** +** int fd1 = open("./file1", O_RDWR|O_CREAT, 0644); +** int fd2 = open("./file2", O_RDWR|O_CREAT, 0644); +** +** Suppose ./file1 and ./file2 are really the same file (because +** one is a hard or symbolic link to the other) then if you set +** an exclusive lock on fd1, then try to get an exclusive lock +** on fd2, it works. I would have expected the second lock to +** fail since there was already a lock on the file due to fd1. +** But not so. Since both locks came from the same process, the +** second overrides the first, even though they were on different +** file descriptors opened on different file names. +** +** Bummer. If you ask me, this is broken. Badly broken. It means +** that we cannot use POSIX locks to synchronize file access among +** competing threads of the same process. POSIX locks will work fine +** to synchronize access for threads in separate processes, but not +** threads within the same process. +** +** To work around the problem, SQLite has to manage file locks internally +** on its own. Whenever a new database is opened, we have to find the +** specific inode of the database file (the inode is determined by the +** st_dev and st_ino fields of the stat structure that fstat() fills in) +** and check for locks already existing on that inode. When locks are +** created or removed, we have to look at our own internal record of the +** locks to see if another thread has previously set a lock on that same +** inode. +** +** The sqlite3_file structure for POSIX is no longer just an integer file +** descriptor. It is now a structure that holds the integer file +** descriptor and a pointer to a structure that describes the internal +** locks on the corresponding inode. There is one locking structure +** per inode, so if the same inode is opened twice, both unixFile structures +** point to the same locking structure. The locking structure keeps +** a reference count (so we will know when to delete it) and a "cnt" +** field that tells us its internal lock status. cnt==0 means the +** file is unlocked. cnt==-1 means the file has an exclusive lock. +** cnt>0 means there are cnt shared locks on the file. +** +** Any attempt to lock or unlock a file first checks the locking +** structure. The fcntl() system call is only invoked to set a +** POSIX lock if the internal lock structure transitions between +** a locked and an unlocked state. +** +** 2004-Jan-11: +** More recent discoveries about POSIX advisory locks. (The more +** I discover, the more I realize the a POSIX advisory locks are +** an abomination.) +** +** If you close a file descriptor that points to a file that has locks, +** all locks on that file that are owned by the current process are +** released. To work around this problem, each unixFile structure contains +** a pointer to an openCnt structure. There is one openCnt structure +** per open inode, which means that multiple unixFile can point to a single +** openCnt. When an attempt is made to close an unixFile, if there are +** other unixFile open on the same inode that are holding locks, the call +** to close() the file descriptor is deferred until all of the locks clear. +** The openCnt structure keeps a list of file descriptors that need to +** be closed and that list is walked (and cleared) when the last lock +** clears. +** +** First, under Linux threads, because each thread has a separate +** process ID, lock operations in one thread do not override locks +** to the same file in other threads. Linux threads behave like +** separate processes in this respect. But, if you close a file +** descriptor in linux threads, all locks are cleared, even locks +** on other threads and even though the other threads have different +** process IDs. Linux threads is inconsistent in this respect. +** (I'm beginning to think that linux threads is an abomination too.) +** The consequence of this all is that the hash table for the lockInfo +** structure has to include the process id as part of its key because +** locks in different threads are treated as distinct. But the +** openCnt structure should not include the process id in its +** key because close() clears lock on all threads, not just the current +** thread. Were it not for this goofiness in linux threads, we could +** combine the lockInfo and openCnt structures into a single structure. +** +** 2004-Jun-28: +** On some versions of linux, threads can override each others locks. +** On others not. Sometimes you can change the behavior on the same +** system by setting the LD_ASSUME_KERNEL environment variable. The +** POSIX standard is silent as to which behavior is correct, as far +** as I can tell, so other versions of unix might show the same +** inconsistency. There is no little doubt in my mind that posix +** advisory locks and linux threads are profoundly broken. +** +** To work around the inconsistencies, we have to test at runtime +** whether or not threads can override each others locks. This test +** is run once, the first time any lock is attempted. A static +** variable is set to record the results of this test for future +** use. +*/ + +/* +** An instance of the following structure serves as the key used +** to locate a particular lockInfo structure given its inode. +** +** If threads cannot override each others locks, then we set the +** lockKey.tid field to the thread ID. If threads can override +** each others locks then tid is always set to zero. tid is omitted +** if we compile without threading support. +*/ +struct lockKey { + dev_t dev; /* Device number */ + ino_t ino; /* Inode number */ +#if SQLITE_THREADSAFE + pthread_t tid; /* Thread ID or zero if threads can override each other */ +#endif +}; + +/* +** An instance of the following structure is allocated for each open +** inode on each thread with a different process ID. (Threads have +** different process IDs on linux, but not on most other unixes.) +** +** A single inode can have multiple file descriptors, so each unixFile +** structure contains a pointer to an instance of this object and this +** object keeps a count of the number of unixFile pointing to it. +*/ +struct lockInfo { + struct lockKey key; /* The lookup key */ + int cnt; /* Number of SHARED locks held */ + int locktype; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ + int nRef; /* Number of pointers to this structure */ +}; + +/* +** An instance of the following structure serves as the key used +** to locate a particular openCnt structure given its inode. This +** is the same as the lockKey except that the thread ID is omitted. +*/ +struct openKey { + dev_t dev; /* Device number */ + ino_t ino; /* Inode number */ +}; + +/* +** An instance of the following structure is allocated for each open +** inode. This structure keeps track of the number of locks on that +** inode. If a close is attempted against an inode that is holding +** locks, the close is deferred until all locks clear by adding the +** file descriptor to be closed to the pending list. +*/ +struct openCnt { + struct openKey key; /* The lookup key */ + int nRef; /* Number of pointers to this structure */ + int nLock; /* Number of outstanding locks */ + int nPending; /* Number of pending close() operations */ + int *aPending; /* Malloced space holding fd's awaiting a close() */ +}; + +/* +** These hash tables map inodes and file descriptors (really, lockKey and +** openKey structures) into lockInfo and openCnt structures. Access to +** these hash tables must be protected by a mutex. +*/ +static Hash lockHash = {SQLITE_HASH_BINARY, 0, 0, 0, 0, 0}; +static Hash openHash = {SQLITE_HASH_BINARY, 0, 0, 0, 0, 0}; + +#ifdef SQLITE_ENABLE_LOCKING_STYLE +/* +** The locking styles are associated with the different file locking +** capabilities supported by different file systems. +** +** POSIX locking style fully supports shared and exclusive byte-range locks +** ADP locking only supports exclusive byte-range locks +** FLOCK only supports a single file-global exclusive lock +** DOTLOCK isn't a true locking style, it refers to the use of a special +** file named the same as the database file with a '.lock' extension, this +** can be used on file systems that do not offer any reliable file locking +** NO locking means that no locking will be attempted, this is only used for +** read-only file systems currently +** UNSUPPORTED means that no locking will be attempted, this is only used for +** file systems that are known to be unsupported +*/ +typedef enum { + posixLockingStyle = 0, /* standard posix-advisory locks */ + afpLockingStyle, /* use afp locks */ + flockLockingStyle, /* use flock() */ + dotlockLockingStyle, /* use .lock files */ + noLockingStyle, /* useful for read-only file system */ + unsupportedLockingStyle /* indicates unsupported file system */ +} sqlite3LockingStyle; +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + +/* +** Helper functions to obtain and relinquish the global mutex. +*/ +static void enterMutex(){ + sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)); +} +static void leaveMutex(){ + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)); +} + +#if SQLITE_THREADSAFE +/* +** This variable records whether or not threads can override each others +** locks. +** +** 0: No. Threads cannot override each others locks. +** 1: Yes. Threads can override each others locks. +** -1: We don't know yet. +** +** On some systems, we know at compile-time if threads can override each +** others locks. On those systems, the SQLITE_THREAD_OVERRIDE_LOCK macro +** will be set appropriately. On other systems, we have to check at +** runtime. On these latter systems, SQLTIE_THREAD_OVERRIDE_LOCK is +** undefined. +** +** This variable normally has file scope only. But during testing, we make +** it a global so that the test code can change its value in order to verify +** that the right stuff happens in either case. +*/ +#ifndef SQLITE_THREAD_OVERRIDE_LOCK +# define SQLITE_THREAD_OVERRIDE_LOCK -1 +#endif +#ifdef SQLITE_TEST +int threadsOverrideEachOthersLocks = SQLITE_THREAD_OVERRIDE_LOCK; +#else +static int threadsOverrideEachOthersLocks = SQLITE_THREAD_OVERRIDE_LOCK; +#endif + +/* +** This structure holds information passed into individual test +** threads by the testThreadLockingBehavior() routine. +*/ +struct threadTestData { + int fd; /* File to be locked */ + struct flock lock; /* The locking operation */ + int result; /* Result of the locking operation */ +}; + +#ifdef SQLITE_LOCK_TRACE +/* +** Print out information about all locking operations. +** +** This routine is used for troubleshooting locks on multithreaded +** platforms. Enable by compiling with the -DSQLITE_LOCK_TRACE +** command-line option on the compiler. This code is normally +** turned off. +*/ +static int lockTrace(int fd, int op, struct flock *p){ + char *zOpName, *zType; + int s; + int savedErrno; + if( op==F_GETLK ){ + zOpName = "GETLK"; + }else if( op==F_SETLK ){ + zOpName = "SETLK"; + }else{ + s = fcntl(fd, op, p); + sqlite3DebugPrintf("fcntl unknown %d %d %d\n", fd, op, s); + return s; + } + if( p->l_type==F_RDLCK ){ + zType = "RDLCK"; + }else if( p->l_type==F_WRLCK ){ + zType = "WRLCK"; + }else if( p->l_type==F_UNLCK ){ + zType = "UNLCK"; + }else{ + assert( 0 ); + } + assert( p->l_whence==SEEK_SET ); + s = fcntl(fd, op, p); + savedErrno = errno; + sqlite3DebugPrintf("fcntl %d %d %s %s %d %d %d %d\n", + threadid, fd, zOpName, zType, (int)p->l_start, (int)p->l_len, + (int)p->l_pid, s); + if( s==(-1) && op==F_SETLK && (p->l_type==F_RDLCK || p->l_type==F_WRLCK) ){ + struct flock l2; + l2 = *p; + fcntl(fd, F_GETLK, &l2); + if( l2.l_type==F_RDLCK ){ + zType = "RDLCK"; + }else if( l2.l_type==F_WRLCK ){ + zType = "WRLCK"; + }else if( l2.l_type==F_UNLCK ){ + zType = "UNLCK"; + }else{ + assert( 0 ); + } + sqlite3DebugPrintf("fcntl-failure-reason: %s %d %d %d\n", + zType, (int)l2.l_start, (int)l2.l_len, (int)l2.l_pid); + } + errno = savedErrno; + return s; +} +#define fcntl lockTrace +#endif /* SQLITE_LOCK_TRACE */ + +/* +** The testThreadLockingBehavior() routine launches two separate +** threads on this routine. This routine attempts to lock a file +** descriptor then returns. The success or failure of that attempt +** allows the testThreadLockingBehavior() procedure to determine +** whether or not threads can override each others locks. +*/ +static void *threadLockingTest(void *pArg){ + struct threadTestData *pData = (struct threadTestData*)pArg; + pData->result = fcntl(pData->fd, F_SETLK, &pData->lock); + return pArg; +} + +/* +** This procedure attempts to determine whether or not threads +** can override each others locks then sets the +** threadsOverrideEachOthersLocks variable appropriately. +*/ +static void testThreadLockingBehavior(int fd_orig){ + int fd; + struct threadTestData d[2]; + pthread_t t[2]; + + fd = dup(fd_orig); + if( fd<0 ) return; + memset(d, 0, sizeof(d)); + d[0].fd = fd; + d[0].lock.l_type = F_RDLCK; + d[0].lock.l_len = 1; + d[0].lock.l_start = 0; + d[0].lock.l_whence = SEEK_SET; + d[1] = d[0]; + d[1].lock.l_type = F_WRLCK; + pthread_create(&t[0], 0, threadLockingTest, &d[0]); + pthread_create(&t[1], 0, threadLockingTest, &d[1]); + pthread_join(t[0], 0); + pthread_join(t[1], 0); + close(fd); + threadsOverrideEachOthersLocks = d[0].result==0 && d[1].result==0; +} +#endif /* SQLITE_THREADSAFE */ + +/* +** Release a lockInfo structure previously allocated by findLockInfo(). +*/ +static void releaseLockInfo(struct lockInfo *pLock){ + if (pLock == NULL) + return; + pLock->nRef--; + if( pLock->nRef==0 ){ + sqlite3HashInsert(&lockHash, &pLock->key, sizeof(pLock->key), 0); + sqlite3_free(pLock); + } +} + +/* +** Release a openCnt structure previously allocated by findLockInfo(). +*/ +static void releaseOpenCnt(struct openCnt *pOpen){ + if (pOpen == NULL) + return; + pOpen->nRef--; + if( pOpen->nRef==0 ){ + sqlite3HashInsert(&openHash, &pOpen->key, sizeof(pOpen->key), 0); + free(pOpen->aPending); + sqlite3_free(pOpen); + } +} + +#ifdef SQLITE_ENABLE_LOCKING_STYLE +/* +** Tests a byte-range locking query to see if byte range locks are +** supported, if not we fall back to dotlockLockingStyle. +*/ +static sqlite3LockingStyle sqlite3TestLockingStyle( + const char *filePath, + int fd +){ + /* test byte-range lock using fcntl */ + struct flock lockInfo; + + lockInfo.l_len = 1; + lockInfo.l_start = 0; + lockInfo.l_whence = SEEK_SET; + lockInfo.l_type = F_RDLCK; + + if( fcntl(fd, F_GETLK, &lockInfo)!=-1 ) { + return posixLockingStyle; + } + + /* testing for flock can give false positives. So if if the above test + ** fails, then we fall back to using dot-lock style locking. + */ + return dotlockLockingStyle; +} + +/* +** Examines the f_fstypename entry in the statfs structure as returned by +** stat() for the file system hosting the database file, assigns the +** appropriate locking style based on it's value. These values and +** assignments are based on Darwin/OSX behavior and have not been tested on +** other systems. +*/ +static sqlite3LockingStyle sqlite3DetectLockingStyle( + const char *filePath, + int fd +){ + +#ifdef SQLITE_FIXED_LOCKING_STYLE + return (sqlite3LockingStyle)SQLITE_FIXED_LOCKING_STYLE; +#else + struct statfs fsInfo; + + if (statfs(filePath, &fsInfo) == -1) + return sqlite3TestLockingStyle(filePath, fd); + + if (fsInfo.f_flags & MNT_RDONLY) + return noLockingStyle; + + if( (!strcmp(fsInfo.f_fstypename, "hfs")) || + (!strcmp(fsInfo.f_fstypename, "ufs")) ) + return posixLockingStyle; + + if(!strcmp(fsInfo.f_fstypename, "afpfs")) + return afpLockingStyle; + + if(!strcmp(fsInfo.f_fstypename, "nfs")) + return sqlite3TestLockingStyle(filePath, fd); + + if(!strcmp(fsInfo.f_fstypename, "smbfs")) + return flockLockingStyle; + + if(!strcmp(fsInfo.f_fstypename, "msdos")) + return dotlockLockingStyle; + + if(!strcmp(fsInfo.f_fstypename, "webdav")) + return unsupportedLockingStyle; + + return sqlite3TestLockingStyle(filePath, fd); +#endif /* SQLITE_FIXED_LOCKING_STYLE */ +} + +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + +/* +** Given a file descriptor, locate lockInfo and openCnt structures that +** describes that file descriptor. Create new ones if necessary. The +** return values might be uninitialized if an error occurs. +** +** Return the number of errors. +*/ +static int findLockInfo( + int fd, /* The file descriptor used in the key */ + struct lockInfo **ppLock, /* Return the lockInfo structure here */ + struct openCnt **ppOpen /* Return the openCnt structure here */ +){ + int rc; + struct lockKey key1; + struct openKey key2; + struct stat statbuf; + struct lockInfo *pLock; + struct openCnt *pOpen; + rc = fstat(fd, &statbuf); + if( rc!=0 ) return 1; + + memset(&key1, 0, sizeof(key1)); + key1.dev = statbuf.st_dev; + key1.ino = statbuf.st_ino; +#if SQLITE_THREADSAFE + if( threadsOverrideEachOthersLocks<0 ){ + testThreadLockingBehavior(fd); + } + key1.tid = threadsOverrideEachOthersLocks ? 0 : pthread_self(); +#endif + memset(&key2, 0, sizeof(key2)); + key2.dev = statbuf.st_dev; + key2.ino = statbuf.st_ino; + pLock = (struct lockInfo*)sqlite3HashFind(&lockHash, &key1, sizeof(key1)); + if( pLock==0 ){ + struct lockInfo *pOld; + pLock = sqlite3_malloc( sizeof(*pLock) ); + if( pLock==0 ){ + rc = 1; + goto exit_findlockinfo; + } + pLock->key = key1; + pLock->nRef = 1; + pLock->cnt = 0; + pLock->locktype = 0; + pOld = sqlite3HashInsert(&lockHash, &pLock->key, sizeof(key1), pLock); + if( pOld!=0 ){ + assert( pOld==pLock ); + sqlite3_free(pLock); + rc = 1; + goto exit_findlockinfo; + } + }else{ + pLock->nRef++; + } + *ppLock = pLock; + if( ppOpen!=0 ){ + pOpen = (struct openCnt*)sqlite3HashFind(&openHash, &key2, sizeof(key2)); + if( pOpen==0 ){ + struct openCnt *pOld; + pOpen = sqlite3_malloc( sizeof(*pOpen) ); + if( pOpen==0 ){ + releaseLockInfo(pLock); + rc = 1; + goto exit_findlockinfo; + } + pOpen->key = key2; + pOpen->nRef = 1; + pOpen->nLock = 0; + pOpen->nPending = 0; + pOpen->aPending = 0; + pOld = sqlite3HashInsert(&openHash, &pOpen->key, sizeof(key2), pOpen); + if( pOld!=0 ){ + assert( pOld==pOpen ); + sqlite3_free(pOpen); + releaseLockInfo(pLock); + rc = 1; + goto exit_findlockinfo; + } + }else{ + pOpen->nRef++; + } + *ppOpen = pOpen; + } + +exit_findlockinfo: + return rc; +} + +#ifdef SQLITE_DEBUG +/* +** Helper function for printing out trace information from debugging +** binaries. This returns the string represetation of the supplied +** integer lock-type. +*/ +static const char *locktypeName(int locktype){ + switch( locktype ){ + case NO_LOCK: return "NONE"; + case SHARED_LOCK: return "SHARED"; + case RESERVED_LOCK: return "RESERVED"; + case PENDING_LOCK: return "PENDING"; + case EXCLUSIVE_LOCK: return "EXCLUSIVE"; + } + return "ERROR"; +} +#endif + +/* +** If we are currently in a different thread than the thread that the +** unixFile argument belongs to, then transfer ownership of the unixFile +** over to the current thread. +** +** A unixFile is only owned by a thread on systems where one thread is +** unable to override locks created by a different thread. RedHat9 is +** an example of such a system. +** +** Ownership transfer is only allowed if the unixFile is currently unlocked. +** If the unixFile is locked and an ownership is wrong, then return +** SQLITE_MISUSE. SQLITE_OK is returned if everything works. +*/ +#if SQLITE_THREADSAFE +static int transferOwnership(unixFile *pFile){ + int rc; + pthread_t hSelf; + if( threadsOverrideEachOthersLocks ){ + /* Ownership transfers not needed on this system */ + return SQLITE_OK; + } + hSelf = pthread_self(); + if( pthread_equal(pFile->tid, hSelf) ){ + /* We are still in the same thread */ + OSTRACE1("No-transfer, same thread\n"); + return SQLITE_OK; + } + if( pFile->locktype!=NO_LOCK ){ + /* We cannot change ownership while we are holding a lock! */ + return SQLITE_MISUSE; + } + OSTRACE4("Transfer ownership of %d from %d to %d\n", + pFile->h, pFile->tid, hSelf); + pFile->tid = hSelf; + if (pFile->pLock != NULL) { + releaseLockInfo(pFile->pLock); + rc = findLockInfo(pFile->h, &pFile->pLock, 0); + OSTRACE5("LOCK %d is now %s(%s,%d)\n", pFile->h, + locktypeName(pFile->locktype), + locktypeName(pFile->pLock->locktype), pFile->pLock->cnt); + return rc; + } else { + return SQLITE_OK; + } +} +#else + /* On single-threaded builds, ownership transfer is a no-op */ +# define transferOwnership(X) SQLITE_OK +#endif + +/* +** Seek to the offset passed as the second argument, then read cnt +** bytes into pBuf. Return the number of bytes actually read. +*/ +static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){ + int got; + i64 newOffset; + TIMER_START; +#if defined(USE_PREAD) + got = pread(id->h, pBuf, cnt, offset); + SimulateIOError( got = -1 ); +#elif defined(USE_PREAD64) + got = pread64(id->h, pBuf, cnt, offset); + SimulateIOError( got = -1 ); +#else + newOffset = lseek(id->h, offset, SEEK_SET); + SimulateIOError( newOffset-- ); + if( newOffset!=offset ){ + return -1; + } + got = read(id->h, pBuf, cnt); +#endif + TIMER_END; + OSTRACE5("READ %-3d %5d %7lld %d\n", id->h, got, offset, TIMER_ELAPSED); + return got; +} + +/* +** Read data from a file into a buffer. Return SQLITE_OK if all +** bytes were read successfully and SQLITE_IOERR if anything goes +** wrong. +*/ +static int unixRead( + sqlite3_file *id, + void *pBuf, + int amt, + sqlite3_int64 offset +){ + int got; + assert( id ); + got = seekAndRead((unixFile*)id, offset, pBuf, amt); + if( got==amt ){ + return SQLITE_OK; + }else if( got<0 ){ + return SQLITE_IOERR_READ; + }else{ + memset(&((char*)pBuf)[got], 0, amt-got); + return SQLITE_IOERR_SHORT_READ; + } +} + +/* +** Seek to the offset in id->offset then read cnt bytes into pBuf. +** Return the number of bytes actually read. Update the offset. +*/ +static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){ + int got; + i64 newOffset; + TIMER_START; +#if defined(USE_PREAD) + got = pwrite(id->h, pBuf, cnt, offset); +#elif defined(USE_PREAD64) + got = pwrite64(id->h, pBuf, cnt, offset); +#else + newOffset = lseek(id->h, offset, SEEK_SET); + if( newOffset!=offset ){ + return -1; + } + got = write(id->h, pBuf, cnt); +#endif + TIMER_END; + OSTRACE5("WRITE %-3d %5d %7lld %d\n", id->h, got, offset, TIMER_ELAPSED); + return got; +} + + +/* +** Write data from a buffer into a file. Return SQLITE_OK on success +** or some other error code on failure. +*/ +static int unixWrite( + sqlite3_file *id, + const void *pBuf, + int amt, + sqlite3_int64 offset +){ + int wrote = 0; + assert( id ); + assert( amt>0 ); + while( amt>0 && (wrote = seekAndWrite((unixFile*)id, offset, pBuf, amt))>0 ){ + amt -= wrote; + offset += wrote; + pBuf = &((char*)pBuf)[wrote]; + } + SimulateIOError(( wrote=(-1), amt=1 )); + SimulateDiskfullError(( wrote=0, amt=1 )); + if( amt>0 ){ + if( wrote<0 ){ + return SQLITE_IOERR_WRITE; + }else{ + return SQLITE_FULL; + } + } + return SQLITE_OK; +} + +#ifdef SQLITE_TEST +/* +** Count the number of fullsyncs and normal syncs. This is used to test +** that syncs and fullsyncs are occuring at the right times. +*/ +int sqlite3_sync_count = 0; +int sqlite3_fullsync_count = 0; +#endif + +/* +** Use the fdatasync() API only if the HAVE_FDATASYNC macro is defined. +** Otherwise use fsync() in its place. +*/ +#ifndef HAVE_FDATASYNC +# define fdatasync fsync +#endif + +/* +** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not +** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently +** only available on Mac OS X. But that could change. +*/ +#ifdef F_FULLFSYNC +# define HAVE_FULLFSYNC 1 +#else +# define HAVE_FULLFSYNC 0 +#endif + + +/* +** The fsync() system call does not work as advertised on many +** unix systems. The following procedure is an attempt to make +** it work better. +** +** The SQLITE_NO_SYNC macro disables all fsync()s. This is useful +** for testing when we want to run through the test suite quickly. +** You are strongly advised *not* to deploy with SQLITE_NO_SYNC +** enabled, however, since with SQLITE_NO_SYNC enabled, an OS crash +** or power failure will likely corrupt the database file. +*/ +static int full_fsync(int fd, int fullSync, int dataOnly){ + int rc; + + /* Record the number of times that we do a normal fsync() and + ** FULLSYNC. This is used during testing to verify that this procedure + ** gets called with the correct arguments. + */ +#ifdef SQLITE_TEST + if( fullSync ) sqlite3_fullsync_count++; + sqlite3_sync_count++; +#endif + + /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a + ** no-op + */ +#ifdef SQLITE_NO_SYNC + rc = SQLITE_OK; +#else + +#if HAVE_FULLFSYNC + if( fullSync ){ + rc = fcntl(fd, F_FULLFSYNC, 0); + }else{ + rc = 1; + } + /* If the FULLFSYNC failed, fall back to attempting an fsync(). + * It shouldn't be possible for fullfsync to fail on the local + * file system (on OSX), so failure indicates that FULLFSYNC + * isn't supported for this file system. So, attempt an fsync + * and (for now) ignore the overhead of a superfluous fcntl call. + * It'd be better to detect fullfsync support once and avoid + * the fcntl call every time sync is called. + */ + if( rc ) rc = fsync(fd); + +#else + if( dataOnly ){ + rc = fdatasync(fd); + }else{ + rc = fsync(fd); + } +#endif /* HAVE_FULLFSYNC */ +#endif /* defined(SQLITE_NO_SYNC) */ + + return rc; +} + +/* +** Make sure all writes to a particular file are committed to disk. +** +** If dataOnly==0 then both the file itself and its metadata (file +** size, access time, etc) are synced. If dataOnly!=0 then only the +** file data is synced. +** +** Under Unix, also make sure that the directory entry for the file +** has been created by fsync-ing the directory that contains the file. +** If we do not do this and we encounter a power failure, the directory +** entry for the journal might not exist after we reboot. The next +** SQLite to access the file will not know that the journal exists (because +** the directory entry for the journal was never created) and the transaction +** will not roll back - possibly leading to database corruption. +*/ +static int unixSync(sqlite3_file *id, int flags){ + int rc; + unixFile *pFile = (unixFile*)id; + + int isDataOnly = (flags&SQLITE_SYNC_DATAONLY); + int isFullsync = (flags&0x0F)==SQLITE_SYNC_FULL; + + /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ + assert((flags&0x0F)==SQLITE_SYNC_NORMAL + || (flags&0x0F)==SQLITE_SYNC_FULL + ); + + assert( pFile ); + OSTRACE2("SYNC %-3d\n", pFile->h); + rc = full_fsync(pFile->h, isFullsync, isDataOnly); + SimulateIOError( rc=1 ); + if( rc ){ + return SQLITE_IOERR_FSYNC; + } + if( pFile->dirfd>=0 ){ + OSTRACE4("DIRSYNC %-3d (have_fullfsync=%d fullsync=%d)\n", pFile->dirfd, + HAVE_FULLFSYNC, isFullsync); +#ifndef SQLITE_DISABLE_DIRSYNC + /* The directory sync is only attempted if full_fsync is + ** turned off or unavailable. If a full_fsync occurred above, + ** then the directory sync is superfluous. + */ + if( (!HAVE_FULLFSYNC || !isFullsync) && full_fsync(pFile->dirfd,0,0) ){ + /* + ** We have received multiple reports of fsync() returning + ** errors when applied to directories on certain file systems. + ** A failed directory sync is not a big deal. So it seems + ** better to ignore the error. Ticket #1657 + */ + /* return SQLITE_IOERR; */ + } +#endif + close(pFile->dirfd); /* Only need to sync once, so close the directory */ + pFile->dirfd = -1; /* when we are done. */ + } + return SQLITE_OK; +} + +/* +** Truncate an open file to a specified size +*/ +static int unixTruncate(sqlite3_file *id, i64 nByte){ + int rc; + assert( id ); + rc = ftruncate(((unixFile*)id)->h, (off_t)nByte); + SimulateIOError( rc=1 ); + if( rc ){ + return SQLITE_IOERR_TRUNCATE; + }else{ + return SQLITE_OK; + } +} + +/* +** Determine the current size of a file in bytes +*/ +static int unixFileSize(sqlite3_file *id, i64 *pSize){ + int rc; + struct stat buf; + assert( id ); + rc = fstat(((unixFile*)id)->h, &buf); + SimulateIOError( rc=1 ); + if( rc!=0 ){ + return SQLITE_IOERR_FSTAT; + } + *pSize = buf.st_size; + return SQLITE_OK; +} + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, return +** non-zero. If the file is unlocked or holds only SHARED locks, then +** return zero. +*/ +static int unixCheckReservedLock(sqlite3_file *id){ + int r = 0; + unixFile *pFile = (unixFile*)id; + + assert( pFile ); + enterMutex(); /* Because pFile->pLock is shared across threads */ + + /* Check if a thread in this process holds such a lock */ + if( pFile->pLock->locktype>SHARED_LOCK ){ + r = 1; + } + + /* Otherwise see if some other process holds it. + */ + if( !r ){ + struct flock lock; + lock.l_whence = SEEK_SET; + lock.l_start = RESERVED_BYTE; + lock.l_len = 1; + lock.l_type = F_WRLCK; + fcntl(pFile->h, F_GETLK, &lock); + if( lock.l_type!=F_UNLCK ){ + r = 1; + } + } + + leaveMutex(); + OSTRACE3("TEST WR-LOCK %d %d\n", pFile->h, r); + + return r; +} + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int unixLock(sqlite3_file *id, int locktype){ + /* The following describes the implementation of the various locks and + ** lock transitions in terms of the POSIX advisory shared and exclusive + ** lock primitives (called read-locks and write-locks below, to avoid + ** confusion with SQLite lock names). The algorithms are complicated + ** slightly in order to be compatible with windows systems simultaneously + ** accessing the same database file, in case that is ever required. + ** + ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved + ** byte', each single bytes at well known offsets, and the 'shared byte + ** range', a range of 510 bytes at a well known offset. + ** + ** To obtain a SHARED lock, a read-lock is obtained on the 'pending + ** byte'. If this is successful, a random byte from the 'shared byte + ** range' is read-locked and the lock on the 'pending byte' released. + ** + ** A process may only obtain a RESERVED lock after it has a SHARED lock. + ** A RESERVED lock is implemented by grabbing a write-lock on the + ** 'reserved byte'. + ** + ** A process may only obtain a PENDING lock after it has obtained a + ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock + ** on the 'pending byte'. This ensures that no new SHARED locks can be + ** obtained, but existing SHARED locks are allowed to persist. A process + ** does not have to obtain a RESERVED lock on the way to a PENDING lock. + ** This property is used by the algorithm for rolling back a journal file + ** after a crash. + ** + ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is + ** implemented by obtaining a write-lock on the entire 'shared byte + ** range'. Since all other locks require a read-lock on one of the bytes + ** within this range, this ensures that no other locks are held on the + ** database. + ** + ** The reason a single byte cannot be used instead of the 'shared byte + ** range' is that some versions of windows do not support read-locks. By + ** locking a random byte from a range, concurrent SHARED locks may exist + ** even if the locking primitive used is always a write-lock. + */ + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + struct lockInfo *pLock = pFile->pLock; + struct flock lock; + int s; + + assert( pFile ); + OSTRACE7("LOCK %d %s was %s(%s,%d) pid=%d\n", pFile->h, + locktypeName(locktype), locktypeName(pFile->locktype), + locktypeName(pLock->locktype), pLock->cnt , getpid()); + + /* If there is already a lock of this type or more restrictive on the + ** unixFile, do nothing. Don't use the end_lock: exit path, as + ** enterMutex() hasn't been called yet. + */ + if( pFile->locktype>=locktype ){ + OSTRACE3("LOCK %d %s ok (already held)\n", pFile->h, + locktypeName(locktype)); + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct + */ + assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); + assert( locktype!=PENDING_LOCK ); + assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + + /* This mutex is needed because pFile->pLock is shared across threads + */ + enterMutex(); + + /* Make sure the current thread owns the pFile. + */ + rc = transferOwnership(pFile); + if( rc!=SQLITE_OK ){ + leaveMutex(); + return rc; + } + pLock = pFile->pLock; + + /* If some thread using this PID has a lock via a different unixFile* + ** handle that precludes the requested lock, return BUSY. + */ + if( (pFile->locktype!=pLock->locktype && + (pLock->locktype>=PENDING_LOCK || locktype>SHARED_LOCK)) + ){ + rc = SQLITE_BUSY; + goto end_lock; + } + + /* If a SHARED lock is requested, and some thread using this PID already + ** has a SHARED or RESERVED lock, then increment reference counts and + ** return SQLITE_OK. + */ + if( locktype==SHARED_LOCK && + (pLock->locktype==SHARED_LOCK || pLock->locktype==RESERVED_LOCK) ){ + assert( locktype==SHARED_LOCK ); + assert( pFile->locktype==0 ); + assert( pLock->cnt>0 ); + pFile->locktype = SHARED_LOCK; + pLock->cnt++; + pFile->pOpen->nLock++; + goto end_lock; + } + + lock.l_len = 1L; + + lock.l_whence = SEEK_SET; + + /* A PENDING lock is needed before acquiring a SHARED lock and before + ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will + ** be released. + */ + if( locktype==SHARED_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktypeh, F_SETLK, &lock); + if( s==(-1) ){ + rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY; + goto end_lock; + } + } + + + /* If control gets to this point, then actually go ahead and make + ** operating system calls for the specified lock. + */ + if( locktype==SHARED_LOCK ){ + assert( pLock->cnt==0 ); + assert( pLock->locktype==0 ); + + /* Now get the read-lock */ + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + s = fcntl(pFile->h, F_SETLK, &lock); + + /* Drop the temporary PENDING lock */ + lock.l_start = PENDING_BYTE; + lock.l_len = 1L; + lock.l_type = F_UNLCK; + if( fcntl(pFile->h, F_SETLK, &lock)!=0 ){ + rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ + goto end_lock; + } + if( s==(-1) ){ + rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY; + }else{ + pFile->locktype = SHARED_LOCK; + pFile->pOpen->nLock++; + pLock->cnt = 1; + } + }else if( locktype==EXCLUSIVE_LOCK && pLock->cnt>1 ){ + /* We are trying for an exclusive lock but another thread in this + ** same process is still holding a shared lock. */ + rc = SQLITE_BUSY; + }else{ + /* The request was for a RESERVED or EXCLUSIVE lock. It is + ** assumed that there is a SHARED or greater lock on the file + ** already. + */ + assert( 0!=pFile->locktype ); + lock.l_type = F_WRLCK; + switch( locktype ){ + case RESERVED_LOCK: + lock.l_start = RESERVED_BYTE; + break; + case EXCLUSIVE_LOCK: + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + break; + default: + assert(0); + } + s = fcntl(pFile->h, F_SETLK, &lock); + if( s==(-1) ){ + rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY; + } + } + + if( rc==SQLITE_OK ){ + pFile->locktype = locktype; + pLock->locktype = locktype; + }else if( locktype==EXCLUSIVE_LOCK ){ + pFile->locktype = PENDING_LOCK; + pLock->locktype = PENDING_LOCK; + } + +end_lock: + leaveMutex(); + OSTRACE4("LOCK %d %s %s\n", pFile->h, locktypeName(locktype), + rc==SQLITE_OK ? "ok" : "failed"); + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int unixUnlock(sqlite3_file *id, int locktype){ + struct lockInfo *pLock; + struct flock lock; + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + + assert( pFile ); + OSTRACE7("UNLOCK %d %d was %d(%d,%d) pid=%d\n", pFile->h, locktype, + pFile->locktype, pFile->pLock->locktype, pFile->pLock->cnt, getpid()); + + assert( locktype<=SHARED_LOCK ); + if( pFile->locktype<=locktype ){ + return SQLITE_OK; + } + if( CHECK_THREADID(pFile) ){ + return SQLITE_MISUSE; + } + enterMutex(); + pLock = pFile->pLock; + assert( pLock->cnt!=0 ); + if( pFile->locktype>SHARED_LOCK ){ + assert( pLock->locktype==pFile->locktype ); + if( locktype==SHARED_LOCK ){ + lock.l_type = F_RDLCK; + lock.l_whence = SEEK_SET; + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + if( fcntl(pFile->h, F_SETLK, &lock)==(-1) ){ + /* This should never happen */ + rc = SQLITE_IOERR_RDLOCK; + } + } + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = PENDING_BYTE; + lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE ); + if( fcntl(pFile->h, F_SETLK, &lock)!=(-1) ){ + pLock->locktype = SHARED_LOCK; + }else{ + rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ + } + } + if( locktype==NO_LOCK ){ + struct openCnt *pOpen; + + /* Decrement the shared lock counter. Release the lock using an + ** OS call only when all threads in this same process have released + ** the lock. + */ + pLock->cnt--; + if( pLock->cnt==0 ){ + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = lock.l_len = 0L; + if( fcntl(pFile->h, F_SETLK, &lock)!=(-1) ){ + pLock->locktype = NO_LOCK; + }else{ + rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ + } + } + + /* Decrement the count of locks against this same file. When the + ** count reaches zero, close any other file descriptors whose close + ** was deferred because of outstanding locks. + */ + pOpen = pFile->pOpen; + pOpen->nLock--; + assert( pOpen->nLock>=0 ); + if( pOpen->nLock==0 && pOpen->nPending>0 ){ + int i; + for(i=0; inPending; i++){ + close(pOpen->aPending[i]); + } + free(pOpen->aPending); + pOpen->nPending = 0; + pOpen->aPending = 0; + } + } + leaveMutex(); + pFile->locktype = locktype; + return rc; +} + +/* +** Close a file. +*/ +static int unixClose(sqlite3_file *id){ + unixFile *pFile = (unixFile *)id; + if( !pFile ) return SQLITE_OK; + unixUnlock(id, NO_LOCK); + if( pFile->dirfd>=0 ) close(pFile->dirfd); + pFile->dirfd = -1; + enterMutex(); + + if( pFile->pOpen->nLock ){ + /* If there are outstanding locks, do not actually close the file just + ** yet because that would clear those locks. Instead, add the file + ** descriptor to pOpen->aPending. It will be automatically closed when + ** the last lock is cleared. + */ + int *aNew; + struct openCnt *pOpen = pFile->pOpen; + aNew = realloc( pOpen->aPending, (pOpen->nPending+1)*sizeof(int) ); + if( aNew==0 ){ + /* If a malloc fails, just leak the file descriptor */ + }else{ + pOpen->aPending = aNew; + pOpen->aPending[pOpen->nPending] = pFile->h; + pOpen->nPending++; + } + }else{ + /* There are no outstanding locks so we can close the file immediately */ + close(pFile->h); + } + releaseLockInfo(pFile->pLock); + releaseOpenCnt(pFile->pOpen); + + leaveMutex(); + OSTRACE2("CLOSE %-3d\n", pFile->h); + OpenCounter(-1); + memset(pFile, 0, sizeof(unixFile)); + return SQLITE_OK; +} + + +#ifdef SQLITE_ENABLE_LOCKING_STYLE +#pragma mark AFP Support + +/* + ** The afpLockingContext structure contains all afp lock specific state + */ +typedef struct afpLockingContext afpLockingContext; +struct afpLockingContext { + unsigned long long sharedLockByte; + char *filePath; +}; + +struct ByteRangeLockPB2 +{ + unsigned long long offset; /* offset to first byte to lock */ + unsigned long long length; /* nbr of bytes to lock */ + unsigned long long retRangeStart; /* nbr of 1st byte locked if successful */ + unsigned char unLockFlag; /* 1 = unlock, 0 = lock */ + unsigned char startEndFlag; /* 1=rel to end of fork, 0=rel to start */ + int fd; /* file desc to assoc this lock with */ +}; + +#define afpfsByteRangeLock2FSCTL _IOWR('z', 23, struct ByteRangeLockPB2) + +/* +** Return 0 on success, 1 on failure. To match the behavior of the +** normal posix file locking (used in unixLock for example), we should +** provide 'richer' return codes - specifically to differentiate between +** 'file busy' and 'file system error' results. +*/ +static int _AFPFSSetLock( + const char *path, + int fd, + unsigned long long offset, + unsigned long long length, + int setLockFlag +){ + struct ByteRangeLockPB2 pb; + int err; + + pb.unLockFlag = setLockFlag ? 0 : 1; + pb.startEndFlag = 0; + pb.offset = offset; + pb.length = length; + pb.fd = fd; + OSTRACE5("AFPLOCK setting lock %s for %d in range %llx:%llx\n", + (setLockFlag?"ON":"OFF"), fd, offset, length); + err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0); + if ( err==-1 ) { + OSTRACE4("AFPLOCK failed to fsctl() '%s' %d %s\n", path, errno, + strerror(errno)); + return 1; /* error */ + } else { + return 0; + } +} + +/* + ** This routine checks if there is a RESERVED lock held on the specified + ** file by this or any other process. If such a lock is held, return + ** non-zero. If the file is unlocked or holds only SHARED locks, then + ** return zero. + */ +static int afpUnixCheckReservedLock(sqlite3_file *id){ + int r = 0; + unixFile *pFile = (unixFile*)id; + + assert( pFile ); + afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; + + /* Check if a thread in this process holds such a lock */ + if( pFile->locktype>SHARED_LOCK ){ + r = 1; + } + + /* Otherwise see if some other process holds it. + */ + if ( !r ) { + /* lock the byte */ + int failed = _AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1,1); + if (failed) { + /* if we failed to get the lock then someone else must have it */ + r = 1; + } else { + /* if we succeeded in taking the reserved lock, unlock it to restore + ** the original state */ + _AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1, 0); + } + } + OSTRACE3("TEST WR-LOCK %d %d\n", pFile->h, r); + + return r; +} + +/* AFP-style locking following the behavior of unixLock, see the unixLock +** function comments for details of lock management. */ +static int afpUnixLock(sqlite3_file *id, int locktype) +{ + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; + int gotPendingLock = 0; + + assert( pFile ); + OSTRACE5("LOCK %d %s was %s pid=%d\n", pFile->h, + locktypeName(locktype), locktypeName(pFile->locktype), getpid()); + /* If there is already a lock of this type or more restrictive on the + ** unixFile, do nothing. Don't use the afp_end_lock: exit path, as + ** enterMutex() hasn't been called yet. + */ + if( pFile->locktype>=locktype ){ + OSTRACE3("LOCK %d %s ok (already held)\n", pFile->h, + locktypeName(locktype)); + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct + */ + assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); + assert( locktype!=PENDING_LOCK ); + assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + + /* This mutex is needed because pFile->pLock is shared across threads + */ + enterMutex(); + + /* Make sure the current thread owns the pFile. + */ + rc = transferOwnership(pFile); + if( rc!=SQLITE_OK ){ + leaveMutex(); + return rc; + } + + /* A PENDING lock is needed before acquiring a SHARED lock and before + ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will + ** be released. + */ + if( locktype==SHARED_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktypefilePath, pFile->h, + PENDING_BYTE, 1, 1); + if (failed) { + rc = SQLITE_BUSY; + goto afp_end_lock; + } + } + + /* If control gets to this point, then actually go ahead and make + ** operating system calls for the specified lock. + */ + if( locktype==SHARED_LOCK ){ + int lk, failed; + int tries = 0; + + /* Now get the read-lock */ + /* note that the quality of the randomness doesn't matter that much */ + lk = random(); + context->sharedLockByte = (lk & 0x7fffffff)%(SHARED_SIZE - 1); + failed = _AFPFSSetLock(context->filePath, pFile->h, + SHARED_FIRST+context->sharedLockByte, 1, 1); + + /* Drop the temporary PENDING lock */ + if (_AFPFSSetLock(context->filePath, pFile->h, PENDING_BYTE, 1, 0)) { + rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ + goto afp_end_lock; + } + + if( failed ){ + rc = SQLITE_BUSY; + } else { + pFile->locktype = SHARED_LOCK; + } + }else{ + /* The request was for a RESERVED or EXCLUSIVE lock. It is + ** assumed that there is a SHARED or greater lock on the file + ** already. + */ + int failed = 0; + assert( 0!=pFile->locktype ); + if (locktype >= RESERVED_LOCK && pFile->locktype < RESERVED_LOCK) { + /* Acquire a RESERVED lock */ + failed = _AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1,1); + } + if (!failed && locktype == EXCLUSIVE_LOCK) { + /* Acquire an EXCLUSIVE lock */ + + /* Remove the shared lock before trying the range. we'll need to + ** reestablish the shared lock if we can't get the afpUnixUnlock + */ + if (!_AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST + + context->sharedLockByte, 1, 0)) { + /* now attemmpt to get the exclusive lock range */ + failed = _AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST, + SHARED_SIZE, 1); + if (failed && _AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST + + context->sharedLockByte, 1, 1)) { + rc = SQLITE_IOERR_RDLOCK; /* this should never happen */ + } + } else { + /* */ + rc = SQLITE_IOERR_UNLOCK; /* this should never happen */ + } + } + if( failed && rc == SQLITE_OK){ + rc = SQLITE_BUSY; + } + } + + if( rc==SQLITE_OK ){ + pFile->locktype = locktype; + }else if( locktype==EXCLUSIVE_LOCK ){ + pFile->locktype = PENDING_LOCK; + } + +afp_end_lock: + leaveMutex(); + OSTRACE4("LOCK %d %s %s\n", pFile->h, locktypeName(locktype), + rc==SQLITE_OK ? "ok" : "failed"); + return rc; +} + +/* + ** Lower the locking level on file descriptor pFile to locktype. locktype + ** must be either NO_LOCK or SHARED_LOCK. + ** + ** If the locking level of the file descriptor is already at or below + ** the requested locking level, this routine is a no-op. + */ +static int afpUnixUnlock(sqlite3_file *id, int locktype) { + struct flock lock; + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; + + assert( pFile ); + OSTRACE5("UNLOCK %d %d was %d pid=%d\n", pFile->h, locktype, + pFile->locktype, getpid()); + + assert( locktype<=SHARED_LOCK ); + if( pFile->locktype<=locktype ){ + return SQLITE_OK; + } + if( CHECK_THREADID(pFile) ){ + return SQLITE_MISUSE; + } + enterMutex(); + if( pFile->locktype>SHARED_LOCK ){ + if( locktype==SHARED_LOCK ){ + int failed = 0; + + /* unlock the exclusive range - then re-establish the shared lock */ + if (pFile->locktype==EXCLUSIVE_LOCK) { + failed = _AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST, + SHARED_SIZE, 0); + if (!failed) { + /* successfully removed the exclusive lock */ + if (_AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST+ + context->sharedLockByte, 1, 1)) { + /* failed to re-establish our shared lock */ + rc = SQLITE_IOERR_RDLOCK; /* This should never happen */ + } + } else { + /* This should never happen - failed to unlock the exclusive range */ + rc = SQLITE_IOERR_UNLOCK; + } + } + } + if (rc == SQLITE_OK && pFile->locktype>=PENDING_LOCK) { + if (_AFPFSSetLock(context->filePath, pFile->h, PENDING_BYTE, 1, 0)){ + /* failed to release the pending lock */ + rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ + } + } + if (rc == SQLITE_OK && pFile->locktype>=RESERVED_LOCK) { + if (_AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1, 0)) { + /* failed to release the reserved lock */ + rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ + } + } + } + if( locktype==NO_LOCK ){ + int failed = _AFPFSSetLock(context->filePath, pFile->h, + SHARED_FIRST + context->sharedLockByte, 1, 0); + if (failed) { + rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ + } + } + if (rc == SQLITE_OK) + pFile->locktype = locktype; + leaveMutex(); + return rc; +} + +/* + ** Close a file & cleanup AFP specific locking context + */ +static int afpUnixClose(sqlite3_file *id) { + unixFile *pFile = (unixFile*)pId; + + if( !pFile ) return SQLITE_OK; + afpUnixUnlock(*pId, NO_LOCK); + /* free the AFP locking structure */ + if (pFile->lockingContext != NULL) { + if (((afpLockingContext *)pFile->lockingContext)->filePath != NULL) + sqlite3_free(((afpLockingContext*)pFile->lockingContext)->filePath); + sqlite3_free(pFile->lockingContext); + } + + if( pFile->dirfd>=0 ) close(pFile->dirfd); + pFile->dirfd = -1; + close(pFile->h); + OSTRACE2("CLOSE %-3d\n", pFile->h); + OpenCounter(-1); + return SQLITE_OK; +} + + +#pragma mark flock() style locking + +/* + ** The flockLockingContext is not used + */ +typedef void flockLockingContext; + +static int flockUnixCheckReservedLock(sqlite3_file *id) { + unixFile *pFile = (unixFile*)id; + + if (pFile->locktype == RESERVED_LOCK) { + return 1; /* already have a reserved lock */ + } else { + /* attempt to get the lock */ + int rc = flock(pFile->h, LOCK_EX | LOCK_NB); + if (!rc) { + /* got the lock, unlock it */ + flock(pFile->h, LOCK_UN); + return 0; /* no one has it reserved */ + } + return 1; /* someone else might have it reserved */ + } +} + +static int flockUnixLock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + + /* if we already have a lock, it is exclusive. + ** Just adjust level and punt on outta here. */ + if (pFile->locktype > NO_LOCK) { + pFile->locktype = locktype; + return SQLITE_OK; + } + + /* grab an exclusive lock */ + int rc = flock(pFile->h, LOCK_EX | LOCK_NB); + if (rc) { + /* didn't get, must be busy */ + return SQLITE_BUSY; + } else { + /* got it, set the type and return ok */ + pFile->locktype = locktype; + return SQLITE_OK; + } +} + +static int flockUnixUnlock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + + assert( locktype<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->locktype==locktype ){ + return SQLITE_OK; + } + + /* shared can just be set because we always have an exclusive */ + if (locktype==SHARED_LOCK) { + pFile->locktype = locktype; + return SQLITE_OK; + } + + /* no, really, unlock. */ + int rc = flock(pFile->h, LOCK_UN); + if (rc) + return SQLITE_IOERR_UNLOCK; + else { + pFile->locktype = NO_LOCK; + return SQLITE_OK; + } +} + +/* + ** Close a file. + */ +static int flockUnixClose(sqlite3_file *pId) { + unixFile *pFile = (unixFile*)*pId; + + if( !pFile ) return SQLITE_OK; + flockUnixUnlock(*pId, NO_LOCK); + + if( pFile->dirfd>=0 ) close(pFile->dirfd); + pFile->dirfd = -1; + enterMutex(); + + close(pFile->h); + leaveMutex(); + OSTRACE2("CLOSE %-3d\n", pFile->h); + OpenCounter(-1); + return SQLITE_OK; +} + +#pragma mark Old-School .lock file based locking + +/* + ** The dotlockLockingContext structure contains all dotlock (.lock) lock + ** specific state + */ +typedef struct dotlockLockingContext dotlockLockingContext; +struct dotlockLockingContext { + char *lockPath; +}; + + +static int dotlockUnixCheckReservedLock(sqlite3_file *id) { + unixFile *pFile = (unixFile*)id; + dotlockLockingContext *context = + (dotlockLockingContext *) pFile->lockingContext; + + if (pFile->locktype == RESERVED_LOCK) { + return 1; /* already have a reserved lock */ + } else { + struct stat statBuf; + if (lstat(context->lockPath,&statBuf) == 0) + /* file exists, someone else has the lock */ + return 1; + else + /* file does not exist, we could have it if we want it */ + return 0; + } +} + +static int dotlockUnixLock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + dotlockLockingContext *context = + (dotlockLockingContext *) pFile->lockingContext; + + /* if we already have a lock, it is exclusive. + ** Just adjust level and punt on outta here. */ + if (pFile->locktype > NO_LOCK) { + pFile->locktype = locktype; + + /* Always update the timestamp on the old file */ + utimes(context->lockPath,NULL); + return SQLITE_OK; + } + + /* check to see if lock file already exists */ + struct stat statBuf; + if (lstat(context->lockPath,&statBuf) == 0){ + return SQLITE_BUSY; /* it does, busy */ + } + + /* grab an exclusive lock */ + int fd = open(context->lockPath,O_RDONLY|O_CREAT|O_EXCL,0600); + if (fd < 0) { + /* failed to open/create the file, someone else may have stolen the lock */ + return SQLITE_BUSY; + } + close(fd); + + /* got it, set the type and return ok */ + pFile->locktype = locktype; + return SQLITE_OK; +} + +static int dotlockUnixUnlock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + dotlockLockingContext *context = + (dotlockLockingContext *) pFile->lockingContext; + + assert( locktype<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->locktype==locktype ){ + return SQLITE_OK; + } + + /* shared can just be set because we always have an exclusive */ + if (locktype==SHARED_LOCK) { + pFile->locktype = locktype; + return SQLITE_OK; + } + + /* no, really, unlock. */ + unlink(context->lockPath); + pFile->locktype = NO_LOCK; + return SQLITE_OK; +} + +/* + ** Close a file. + */ +static int dotlockUnixClose(sqlite3_file *id) { + unixFile *pFile = (unixFile*)id; + + if( !pFile ) return SQLITE_OK; + dotlockUnixUnlock(*pId, NO_LOCK); + /* free the dotlock locking structure */ + if (pFile->lockingContext != NULL) { + if (((dotlockLockingContext *)pFile->lockingContext)->lockPath != NULL) + sqlite3_free( ( (dotlockLockingContext *) + pFile->lockingContext)->lockPath); + sqlite3_free(pFile->lockingContext); + } + + if( pFile->dirfd>=0 ) close(pFile->dirfd); + pFile->dirfd = -1; + enterMutex(); + + close(pFile->h); + + leaveMutex(); + OSTRACE2("CLOSE %-3d\n", pFile->h); + OpenCounter(-1); + return SQLITE_OK; +} + + +#pragma mark No locking + +/* + ** The nolockLockingContext is void + */ +typedef void nolockLockingContext; + +static int nolockUnixCheckReservedLock(sqlite3_file *id) { + return 0; +} + +static int nolockUnixLock(sqlite3_file *id, int locktype) { + return SQLITE_OK; +} + +static int nolockUnixUnlock(sqlite3_file *id, int locktype) { + return SQLITE_OK; +} + +/* + ** Close a file. + */ +static int nolockUnixClose(sqlite3_file *id) { + unixFile *pFile = (unixFile*)id; + + if( !pFile ) return SQLITE_OK; + if( pFile->dirfd>=0 ) close(pFile->dirfd); + pFile->dirfd = -1; + enterMutex(); + + close(pFile->h); + + leaveMutex(); + OSTRACE2("CLOSE %-3d\n", pFile->h); + OpenCounter(-1); + return SQLITE_OK; +} + +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + + +/* +** Information and control of an open file handle. +*/ +static int unixFileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + *(int*)pArg = ((unixFile*)id)->locktype; + return SQLITE_OK; + } + } + return SQLITE_ERROR; +} + +/* +** Return the sector size in bytes of the underlying block device for +** the specified file. This is almost always 512 bytes, but may be +** larger for some devices. +** +** SQLite code assumes this function cannot fail. It also assumes that +** if two files are created in the same file-system directory (i.e. +** a database and it's journal file) that the sector size will be the +** same for both. +*/ +static int unixSectorSize(sqlite3_file *id){ + return SQLITE_DEFAULT_SECTOR_SIZE; +} + +/* +** Return the device characteristics for the file. This is always 0. +*/ +static int unixDeviceCharacteristics(sqlite3_file *id){ + return 0; +} + +/* +** This vector defines all the methods that can operate on an sqlite3_file +** for unix. +*/ +static const sqlite3_io_methods sqlite3UnixIoMethod = { + 1, /* iVersion */ + unixClose, + unixRead, + unixWrite, + unixTruncate, + unixSync, + unixFileSize, + unixLock, + unixUnlock, + unixCheckReservedLock, + unixFileControl, + unixSectorSize, + unixDeviceCharacteristics +}; + +#ifdef SQLITE_ENABLE_LOCKING_STYLE +/* +** This vector defines all the methods that can operate on an sqlite3_file +** for unix with AFP style file locking. +*/ +static const sqlite3_io_methods sqlite3AFPLockingUnixIoMethod = { + 1, /* iVersion */ + unixClose, + unixRead, + unixWrite, + unixTruncate, + unixSync, + unixFileSize, + afpUnixLock, + afpUnixUnlock, + afpUnixCheckReservedLock, + unixFileControl, + unixSectorSize, + unixDeviceCharacteristics +}; + +/* +** This vector defines all the methods that can operate on an sqlite3_file +** for unix with flock() style file locking. +*/ +static const sqlite3_io_methods sqlite3FlockLockingUnixIoMethod = { + 1, /* iVersion */ + flockUnixClose, + unixRead, + unixWrite, + unixTruncate, + unixSync, + unixFileSize, + flockUnixLock, + flockUnixUnlock, + flockUnixCheckReservedLock, + unixFileControl, + unixSectorSize, + unixDeviceCharacteristics +}; + +/* +** This vector defines all the methods that can operate on an sqlite3_file +** for unix with dotlock style file locking. +*/ +static const sqlite3_io_methods sqlite3DotlockLockingUnixIoMethod = { + 1, /* iVersion */ + dotlockUnixClose, + unixRead, + unixWrite, + unixTruncate, + unixSync, + unixFileSize, + dotlockUnixLock, + dotlockUnixUnlock, + dotlockUnixCheckReservedLock, + unixFileControl, + unixSectorSize, + unixDeviceCharacteristics +}; + +/* +** This vector defines all the methods that can operate on an sqlite3_file +** for unix with dotlock style file locking. +*/ +static const sqlite3_io_methods sqlite3NolockLockingUnixIoMethod = { + 1, /* iVersion */ + nolockUnixClose, + unixRead, + unixWrite, + unixTruncate, + unixSync, + unixFileSize, + nolockUnixLock, + nolockUnixUnlock, + nolockUnixCheckReservedLock, + unixFileControl, + unixSectorSize, + unixDeviceCharacteristics +}; + +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + +/* +** Allocate memory for a new unixFile and initialize that unixFile. +** Write a pointer to the new unixFile into *pId. +** If we run out of memory, close the file and return an error. +*/ +#ifdef SQLITE_ENABLE_LOCKING_STYLE +/* +** When locking extensions are enabled, the filepath and locking style +** are needed to determine the unixFile pMethod to use for locking operations. +** The locking-style specific lockingContext data structure is created +** and assigned here also. +*/ +static int fillInUnixFile( + int h, /* Open file descriptor of file being opened */ + int dirfd, /* Directory file descriptor */ + sqlite3_file *pId, /* Write completed initialization here */ + const char *zFilename, /* Name of the file being opened */ +){ + sqlite3LockingStyle lockingStyle; + unixFile *pNew = (unixFile *)pId; + int rc; + + memset(pNew, 0, sizeof(unixFile)); + lockingStyle = sqlite3DetectLockingStyle(zFilename, h); + if ( lockingStyle == posixLockingStyle ) { + enterMutex(); + rc = findLockInfo(h, &pNew->pLock, &pNew->pOpen); + leaveMutex(); + if( rc ){ + close(h); + unlink(zFilename); + return SQLITE_NOMEM; + } + } else { + /* pLock and pOpen are only used for posix advisory locking */ + pNew->pLock = NULL; + pNew->pOpen = NULL; + } + pNew->dirfd = -1; + pNew->h = h; + SET_THREADID(pNew); + pNew = sqlite3_malloc( sizeof(unixFile) ); + if( pNew==0 ){ + close(h); + enterMutex(); + releaseLockInfo(pNew->pLock); + releaseOpenCnt(pNew->pOpen); + leaveMutex(); + return SQLITE_NOMEM; + }else{ + switch(lockingStyle) { + case afpLockingStyle: { + /* afp locking uses the file path so it needs to be included in + ** the afpLockingContext */ + int nFilename; + pNew->pMethod = &sqlite3AFPLockingUnixIoMethod; + pNew->lockingContext = + sqlite3_malloc(sizeof(afpLockingContext)); + nFilename = strlen(zFilename)+1; + ((afpLockingContext *)pNew->lockingContext)->filePath = + sqlite3_malloc(nFilename); + memcpy(((afpLockingContext *)pNew->lockingContext)->filePath, + zFilename, nFilename); + srandomdev(); + break; + } + case flockLockingStyle: + /* flock locking doesn't need additional lockingContext information */ + pNew->pMethod = &sqlite3FlockLockingUnixIoMethod; + break; + case dotlockLockingStyle: { + /* dotlock locking uses the file path so it needs to be included in + ** the dotlockLockingContext */ + int nFilename; + pNew->pMethod = &sqlite3DotlockLockingUnixIoMethod; + pNew->lockingContext = sqlite3_malloc( + sizeof(dotlockLockingContext)); + nFilename = strlen(zFilename) + 6; + ((dotlockLockingContext *)pNew->lockingContext)->lockPath = + sqlite3_malloc( nFilename ); + sqlite3_snprintf(nFilename, + ((dotlockLockingContext *)pNew->lockingContext)->lockPath, + "%s.lock", zFilename); + break; + } + case posixLockingStyle: + /* posix locking doesn't need additional lockingContext information */ + pNew->pMethod = &sqlite3UnixIoMethod; + break; + case noLockingStyle: + case unsupportedLockingStyle: + default: + pNew->pMethod = &sqlite3NolockLockingUnixIoMethod; + } + OpenCounter(+1); + return SQLITE_OK; + } +} +#else /* SQLITE_ENABLE_LOCKING_STYLE */ +static int fillInUnixFile( + int h, /* Open file descriptor on file being opened */ + int dirfd, + sqlite3_file *pId, /* Write to the unixFile structure here */ + const char *zFilename /* Name of the file being opened */ +){ + unixFile *pNew = (unixFile *)pId; + int rc; + +#ifdef FD_CLOEXEC + fcntl(h, F_SETFD, fcntl(h, F_GETFD, 0) | FD_CLOEXEC); +#endif + + enterMutex(); + rc = findLockInfo(h, &pNew->pLock, &pNew->pOpen); + leaveMutex(); + if( rc ){ + close(h); + return SQLITE_NOMEM; + } + + OSTRACE3("OPEN %-3d %s\n", h, zFilename); + pNew->dirfd = -1; + pNew->h = h; + pNew->dirfd = dirfd; + SET_THREADID(pNew); + + pNew->pMethod = &sqlite3UnixIoMethod; + OpenCounter(+1); + return SQLITE_OK; +} +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + +/* +** Open a file descriptor to the directory containing file zFilename. +** If successful, *pFd is set to the opened file descriptor and +** SQLITE_OK is returned. If an error occurs, either SQLITE_NOMEM +** or SQLITE_CANTOPEN is returned and *pFd is set to an undefined +** value. +** +** If SQLITE_OK is returned, the caller is responsible for closing +** the file descriptor *pFd using close(). +*/ +static int openDirectory(const char *zFilename, int *pFd){ + int ii; + int fd = -1; + char zDirname[MAX_PATHNAME+1]; + + sqlite3_snprintf(MAX_PATHNAME, zDirname, "%s", zFilename); + for(ii=strlen(zDirname); ii>=0 && zDirname[ii]!='/'; ii--); + if( ii>0 ){ + zDirname[ii] = '\0'; + fd = open(zDirname, O_RDONLY|O_BINARY, 0); + if( fd>=0 ){ +#ifdef FD_CLOEXEC + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD, 0) | FD_CLOEXEC); +#endif + OSTRACE3("OPENDIR %-3d %s\n", fd, zDirname); + } + } + *pFd = fd; + return (fd>=0?SQLITE_OK:SQLITE_CANTOPEN); +} + +/* +** Open the file zPath. +** +** Previously, the SQLite OS layer used three functions in place of this +** one: +** +** sqlite3OsOpenReadWrite(); +** sqlite3OsOpenReadOnly(); +** sqlite3OsOpenExclusive(); +** +** These calls correspond to the following combinations of flags: +** +** ReadWrite() -> (READWRITE | CREATE) +** ReadOnly() -> (READONLY) +** OpenExclusive() -> (READWRITE | CREATE | EXCLUSIVE) +** +** The old OpenExclusive() accepted a boolean argument - "delFlag". If +** true, the file was configured to be automatically deleted when the +** file handle closed. To achieve the same effect using this new +** interface, add the DELETEONCLOSE flag to those specified above for +** OpenExclusive(). +*/ +static int unixOpen( + sqlite3_vfs *pVfs, + const char *zPath, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + int fd = 0; /* File descriptor returned by open() */ + int dirfd = -1; /* Directory file descriptor */ + int oflags = 0; /* Flags to pass to open() */ + int eType = flags&0xFFFFFF00; /* Type of file to open */ + + int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); + int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); + int isCreate = (flags & SQLITE_OPEN_CREATE); + int isReadonly = (flags & SQLITE_OPEN_READONLY); + int isReadWrite = (flags & SQLITE_OPEN_READWRITE); + + /* If creating a master or main-file journal, this function will open + ** a file-descriptor on the directory too. The first time unixSync() + ** is called the directory file descriptor will be fsync()ed and close()d. + */ + int isOpenDirectory = (isCreate && + (eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL) + ); + + /* Check the following statements are true: + ** + ** (a) Exactly one of the READWRITE and READONLY flags must be set, and + ** (b) if CREATE is set, then READWRITE must also be set, and + ** (c) if EXCLUSIVE is set, then CREATE must also be set. + ** (d) if DELETEONCLOSE is set, then CREATE must also be set. + */ + assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); + assert(isCreate==0 || isReadWrite); + assert(isExclusive==0 || isCreate); + assert(isDelete==0 || isCreate); + + + /* The main DB, main journal, and master journal are never automatically + ** deleted + */ + assert( eType!=SQLITE_OPEN_MAIN_DB || !isDelete ); + assert( eType!=SQLITE_OPEN_MAIN_JOURNAL || !isDelete ); + assert( eType!=SQLITE_OPEN_MASTER_JOURNAL || !isDelete ); + + /* Assert that the upper layer has set one of the "file-type" flags. */ + assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB + || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL + || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL + || eType==SQLITE_OPEN_TRANSIENT_DB + ); + + if( isReadonly ) oflags |= O_RDONLY; + if( isReadWrite ) oflags |= O_RDWR; + if( isCreate ) oflags |= O_CREAT; + if( isExclusive ) oflags |= (O_EXCL|O_NOFOLLOW); + oflags |= (O_LARGEFILE|O_BINARY); + + memset(pFile, 0, sizeof(unixFile)); + fd = open(zPath, oflags, isDelete?0600:SQLITE_DEFAULT_FILE_PERMISSIONS); + if( fd<0 && errno!=EISDIR && isReadWrite && !isExclusive ){ + /* Failed to open the file for read/write access. Try read-only. */ + flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); + flags |= SQLITE_OPEN_READONLY; + return unixOpen(pVfs, zPath, pFile, flags, pOutFlags); + } + if( fd<0 ){ + return SQLITE_CANTOPEN; + } + if( isDelete ){ + unlink(zPath); + } + if( pOutFlags ){ + *pOutFlags = flags; + } + + assert(fd!=0); + if( isOpenDirectory ){ + int rc = openDirectory(zPath, &dirfd); + if( rc!=SQLITE_OK ){ + close(fd); + return rc; + } + } + return fillInUnixFile(fd, dirfd, pFile, zPath); +} + +/* +** Delete the file at zPath. If the dirSync argument is true, fsync() +** the directory after deleting the file. +*/ +static int unixDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + int rc = SQLITE_OK; + SimulateIOError(return SQLITE_IOERR_DELETE); + unlink(zPath); + if( dirSync ){ + int fd; + rc = openDirectory(zPath, &fd); + if( rc==SQLITE_OK ){ + if( fsync(fd) ){ + rc = SQLITE_IOERR_DIR_FSYNC; + } + close(fd); + } + } + return rc; +} + +/* +** Test the existance of or access permissions of file zPath. The +** test performed depends on the value of flags: +** +** SQLITE_ACCESS_EXISTS: Return 1 if the file exists +** SQLITE_ACCESS_READWRITE: Return 1 if the file is read and writable. +** SQLITE_ACCESS_READONLY: Return 1 if the file is readable. +** +** Otherwise return 0. +*/ +static int unixAccess(sqlite3_vfs *pVfs, const char *zPath, int flags){ + int amode = 0; + switch( flags ){ + case SQLITE_ACCESS_EXISTS: + amode = F_OK; + break; + case SQLITE_ACCESS_READWRITE: + amode = W_OK|R_OK; + break; + case SQLITE_ACCESS_READ: + amode = R_OK; + break; + + default: + assert(!"Invalid flags argument"); + } + return (access(zPath, amode)==0); +} + +/* +** Create a temporary file name in zBuf. zBuf must be allocated +** by the calling process and must be big enough to hold at least +** pVfs->mxPathname bytes. +*/ +static int unixGetTempname(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + static const char *azDirs[] = { + 0, + "/var/tmp", + "/usr/tmp", + "/tmp", + ".", + }; + static const unsigned char zChars[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789"; + int i, j; + struct stat buf; + const char *zDir = "."; + + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. + */ + SimulateIOError( return SQLITE_ERROR ); + + azDirs[0] = sqlite3_temp_directory; + for(i=0; imxPathname==MAX_PATHNAME ); + assert( nBuf>=MAX_PATHNAME ); + sqlite3_snprintf(MAX_PATHNAME-17, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX, zDir); + j = strlen(zBuf); + sqlite3Randomness(15, &zBuf[j]); + for(i=0; i<15; i++, j++){ + zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; + } + zBuf[j] = 0; + }while( access(zBuf,0)==0 ); + return SQLITE_OK; +} + + +/* +** Turn a relative pathname into a full pathname. The relative path +** is stored as a nul-terminated string in the buffer pointed to by +** zPath. +** +** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes +** (in this case, MAX_PATHNAME bytes). The full-path is written to +** this buffer before returning. +*/ +static int unixFullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zPath, /* Possibly relative input path */ + int nOut, /* Size of output buffer in bytes */ + char *zOut /* Output buffer */ +){ + + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. This function could fail if, for example, the + ** current working directly has been unlinked. + */ + SimulateIOError( return SQLITE_ERROR ); + + assert( pVfs->mxPathname==MAX_PATHNAME ); + zOut[MAX_PATHNAME-1] = '\0'; + if( zPath[0]=='/' ){ + sqlite3_snprintf(MAX_PATHNAME, zOut, "%s", zPath); + }else{ + int nCwd; + if( getcwd(zOut, MAX_PATHNAME-1)==0 ){ + return SQLITE_CANTOPEN; + } + nCwd = strlen(zOut); + sqlite3_snprintf(MAX_PATHNAME-nCwd, &zOut[nCwd], "/%s", zPath); + } + return SQLITE_OK; + +#if 0 + /* + ** Remove "/./" path elements and convert "/A/./" path elements + ** to just "/". + */ + if( zFull ){ + int i, j; + for(i=j=0; zFull[i]; i++){ + if( zFull[i]=='/' ){ + if( zFull[i+1]=='/' ) continue; + if( zFull[i+1]=='.' && zFull[i+2]=='/' ){ + i += 1; + continue; + } + if( zFull[i+1]=='.' && zFull[i+2]=='.' && zFull[i+3]=='/' ){ + while( j>0 && zFull[j-1]!='/' ){ j--; } + i += 3; + continue; + } + } + zFull[j++] = zFull[i]; + } + zFull[j] = 0; + } +#endif +} + + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +#include +static void *unixDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ + return dlopen(zFilename, RTLD_NOW | RTLD_GLOBAL); +} + +/* +** SQLite calls this function immediately after a call to unixDlSym() or +** unixDlOpen() fails (returns a null pointer). If a more detailed error +** message is available, it is written to zBufOut. If no error message +** is available, zBufOut is left unmodified and SQLite uses a default +** error message. +*/ +static void unixDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ + char *zErr; + enterMutex(); + zErr = dlerror(); + if( zErr ){ + sqlite3_snprintf(nBuf, zBufOut, "%s", zErr); + } + leaveMutex(); +} +static void *unixDlSym(sqlite3_vfs *pVfs, void *pHandle, const char *zSymbol){ + return dlsym(pHandle, zSymbol); +} +static void unixDlClose(sqlite3_vfs *pVfs, void *pHandle){ + dlclose(pHandle); +} +#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ + #define unixDlOpen 0 + #define unixDlError 0 + #define unixDlSym 0 + #define unixDlClose 0 +#endif + +/* +** Write nBuf bytes of random data to the supplied buffer zBuf. +*/ +static int unixRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + + assert(nBuf>=(sizeof(time_t)+sizeof(int))); + + /* We have to initialize zBuf to prevent valgrind from reporting + ** errors. The reports issued by valgrind are incorrect - we would + ** prefer that the randomness be increased by making use of the + ** uninitialized space in zBuf - but valgrind errors tend to worry + ** some users. Rather than argue, it seems easier just to initialize + ** the whole array and silence valgrind, even if that means less randomness + ** in the random seed. + ** + ** When testing, initializing zBuf[] to zero is all we do. That means + ** that we always use the same random number sequence. This makes the + ** tests repeatable. + */ + memset(zBuf, 0, nBuf); +#if !defined(SQLITE_TEST) + { + int pid, fd; + fd = open("/dev/urandom", O_RDONLY); + if( fd<0 ){ + time_t t; + time(&t); + memcpy(zBuf, &t, sizeof(t)); + pid = getpid(); + memcpy(&zBuf[sizeof(t)], &pid, sizeof(pid)); + }else{ + read(fd, zBuf, nBuf); + close(fd); + } + } +#endif + return SQLITE_OK; +} + + +/* +** Sleep for a little while. Return the amount of time slept. +** The argument is the number of microseconds we want to sleep. +** The return value is the number of microseconds of sleep actually +** requested from the underlying operating system, a number which +** might be greater than or equal to the argument, but not less +** than the argument. +*/ +static int unixSleep(sqlite3_vfs *pVfs, int microseconds){ +#if defined(HAVE_USLEEP) && HAVE_USLEEP + usleep(microseconds); + return microseconds; +#else + int seconds = (microseconds+999999)/1000000; + sleep(seconds); + return seconds*1000000; +#endif +} + +/* +** The following variable, if set to a non-zero value, becomes the result +** returned from sqlite3OsCurrentTime(). This is used for testing. +*/ +#ifdef SQLITE_TEST +int sqlite3_current_time = 0; +#endif + +/* +** Find the current time (in Universal Coordinated Time). Write the +** current time and date as a Julian Day number into *prNow and +** return 0. Return 1 if the time and date cannot be found. +*/ +static int unixCurrentTime(sqlite3_vfs *pVfs, double *prNow){ +#ifdef NO_GETTOD + time_t t; + time(&t); + *prNow = t/86400.0 + 2440587.5; +#else + struct timeval sNow; + gettimeofday(&sNow, 0); + *prNow = 2440587.5 + sNow.tv_sec/86400.0 + sNow.tv_usec/86400000000.0; +#endif +#ifdef SQLITE_TEST + if( sqlite3_current_time ){ + *prNow = sqlite3_current_time/86400.0 + 2440587.5; + } +#endif + return 0; +} + +/* +** Return a pointer to the sqlite3DefaultVfs structure. We use +** a function rather than give the structure global scope because +** some compilers (MSVC) do not allow forward declarations of +** initialized structures. +*/ +sqlite3_vfs *sqlite3OsDefaultVfs(void){ + static sqlite3_vfs unixVfs = { + 1, /* iVersion */ + sizeof(unixFile), /* szOsFile */ + MAX_PATHNAME, /* mxPathname */ + 0, /* pNext */ + "unix", /* zName */ + 0, /* pAppData */ + + unixOpen, /* xOpen */ + unixDelete, /* xDelete */ + unixAccess, /* xAccess */ + unixGetTempname, /* xGetTempName */ + unixFullPathname, /* xFullPathname */ + unixDlOpen, /* xDlOpen */ + unixDlError, /* xDlError */ + unixDlSym, /* xDlSym */ + unixDlClose, /* xDlClose */ + unixRandomness, /* xRandomness */ + unixSleep, /* xSleep */ + unixCurrentTime /* xCurrentTime */ + }; + + return &unixVfs; +} + +#endif /* OS_UNIX */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/os_win.c b/libraries/sqlite/unix/sqlite-3.5.1/src/os_win.c new file mode 100644 index 0000000000..bcb1c6ec2c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/os_win.c @@ -0,0 +1,1545 @@ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that is specific to windows. +*/ +#include "sqliteInt.h" +#if OS_WIN /* This file is used for windows only */ + + +/* +** A Note About Memory Allocation: +** +** This driver uses malloc()/free() directly rather than going through +** the SQLite-wrappers sqlite3_malloc()/sqlite3_free(). Those wrappers +** are designed for use on embedded systems where memory is scarce and +** malloc failures happen frequently. Win32 does not typically run on +** embedded systems, and when it does the developers normally have bigger +** problems to worry about than running out of memory. So there is not +** a compelling need to use the wrappers. +** +** But there is a good reason to not use the wrappers. If we use the +** wrappers then we will get simulated malloc() failures within this +** driver. And that causes all kinds of problems for our tests. We +** could enhance SQLite to deal with simulated malloc failures within +** the OS driver, but the code to deal with those failure would not +** be exercised on Linux (which does not need to malloc() in the driver) +** and so we would have difficulty writing coverage tests for that +** code. Better to leave the code out, we think. +** +** The point of this discussion is as follows: When creating a new +** OS layer for an embedded system, if you use this file as an example, +** avoid the use of malloc()/free(). Those routines work ok on windows +** desktops but not so well in embedded systems. +*/ + +#include + +#ifdef __CYGWIN__ +# include +#endif + +/* +** Macros used to determine whether or not to use threads. +*/ +#if defined(THREADSAFE) && THREADSAFE +# define SQLITE_W32_THREADS 1 +#endif + +/* +** Include code that is common to all os_*.c files +*/ +#include "os_common.h" + +/* +** Determine if we are dealing with WindowsCE - which has a much +** reduced API. +*/ +#if defined(_WIN32_WCE) +# define OS_WINCE 1 +# define AreFileApisANSI() 1 +#else +# define OS_WINCE 0 +#endif + +/* +** WinCE lacks native support for file locking so we have to fake it +** with some code of our own. +*/ +#if OS_WINCE +typedef struct winceLock { + int nReaders; /* Number of reader locks obtained */ + BOOL bPending; /* Indicates a pending lock has been obtained */ + BOOL bReserved; /* Indicates a reserved lock has been obtained */ + BOOL bExclusive; /* Indicates an exclusive lock has been obtained */ +} winceLock; +#endif + +/* +** The winFile structure is a subclass of sqlite3_file* specific to the win32 +** portability layer. +*/ +typedef struct winFile winFile; +struct winFile { + const sqlite3_io_methods *pMethod;/* Must be first */ + HANDLE h; /* Handle for accessing the file */ + unsigned char locktype; /* Type of lock currently held on this file */ + short sharedLockByte; /* Randomly chosen byte used as a shared lock */ +#if OS_WINCE + WCHAR *zDeleteOnClose; /* Name of file to delete when closing */ + HANDLE hMutex; /* Mutex used to control access to shared lock */ + HANDLE hShared; /* Shared memory segment used for locking */ + winceLock local; /* Locks obtained by this instance of winFile */ + winceLock *shared; /* Global shared lock memory for the file */ +#endif +}; + + +/* +** The following variable is (normally) set once and never changes +** thereafter. It records whether the operating system is Win95 +** or WinNT. +** +** 0: Operating system unknown. +** 1: Operating system is Win95. +** 2: Operating system is WinNT. +** +** In order to facilitate testing on a WinNT system, the test fixture +** can manually set this value to 1 to emulate Win98 behavior. +*/ +#ifdef SQLITE_TEST +int sqlite3_os_type = 0; +#else +static int sqlite3_os_type = 0; +#endif + +/* +** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, +** or WinCE. Return false (zero) for Win95, Win98, or WinME. +** +** Here is an interesting observation: Win95, Win98, and WinME lack +** the LockFileEx() API. But we can still statically link against that +** API as long as we don't call it win running Win95/98/ME. A call to +** this routine is used to determine if the host is Win95/98/ME or +** WinNT/2K/XP so that we will know whether or not we can safely call +** the LockFileEx() API. +*/ +#if OS_WINCE +# define isNT() (1) +#else + static int isNT(void){ + if( sqlite3_os_type==0 ){ + OSVERSIONINFO sInfo; + sInfo.dwOSVersionInfoSize = sizeof(sInfo); + GetVersionEx(&sInfo); + sqlite3_os_type = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1; + } + return sqlite3_os_type==2; + } +#endif /* OS_WINCE */ + +/* +** Convert a UTF-8 string to microsoft unicode (UTF-16?). +** +** Space to hold the returned string is obtained from malloc. +*/ +static WCHAR *utf8ToUnicode(const char *zFilename){ + int nChar; + WCHAR *zWideFilename; + + nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); + zWideFilename = malloc( nChar*sizeof(zWideFilename[0]) ); + if( zWideFilename==0 ){ + return 0; + } + nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename, nChar); + if( nChar==0 ){ + free(zWideFilename); + zWideFilename = 0; + } + return zWideFilename; +} + +/* +** Convert microsoft unicode to UTF-8. Space to hold the returned string is +** obtained from malloc(). +*/ +static char *unicodeToUtf8(const WCHAR *zWideFilename){ + int nByte; + char *zFilename; + + nByte = WideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, 0, 0, 0, 0); + zFilename = malloc( nByte ); + if( zFilename==0 ){ + return 0; + } + nByte = WideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, zFilename, nByte, + 0, 0); + if( nByte == 0 ){ + free(zFilename); + zFilename = 0; + } + return zFilename; +} + +/* +** Convert an ansi string to microsoft unicode, based on the +** current codepage settings for file apis. +** +** Space to hold the returned string is obtained +** from malloc. +*/ +static WCHAR *mbcsToUnicode(const char *zFilename){ + int nByte; + WCHAR *zMbcsFilename; + int codepage = AreFileApisANSI() ? CP_ACP : CP_OEMCP; + + nByte = MultiByteToWideChar(codepage, 0, zFilename, -1, NULL,0)*sizeof(WCHAR); + zMbcsFilename = malloc( nByte*sizeof(zMbcsFilename[0]) ); + if( zMbcsFilename==0 ){ + return 0; + } + nByte = MultiByteToWideChar(codepage, 0, zFilename, -1, zMbcsFilename, nByte); + if( nByte==0 ){ + free(zMbcsFilename); + zMbcsFilename = 0; + } + return zMbcsFilename; +} + +/* +** Convert microsoft unicode to multibyte character string, based on the +** user's Ansi codepage. +** +** Space to hold the returned string is obtained from +** malloc(). +*/ +static char *unicodeToMbcs(const WCHAR *zWideFilename){ + int nByte; + char *zFilename; + int codepage = AreFileApisANSI() ? CP_ACP : CP_OEMCP; + + nByte = WideCharToMultiByte(codepage, 0, zWideFilename, -1, 0, 0, 0, 0); + zFilename = malloc( nByte ); + if( zFilename==0 ){ + return 0; + } + nByte = WideCharToMultiByte(codepage, 0, zWideFilename, -1, zFilename, nByte, + 0, 0); + if( nByte == 0 ){ + free(zFilename); + zFilename = 0; + } + return zFilename; +} + +/* +** Convert multibyte character string to UTF-8. Space to hold the +** returned string is obtained from malloc(). +*/ +static char *mbcsToUtf8(const char *zFilename){ + char *zFilenameUtf8; + WCHAR *zTmpWide; + + zTmpWide = mbcsToUnicode(zFilename); + if( zTmpWide==0 ){ + return 0; + } + zFilenameUtf8 = unicodeToUtf8(zTmpWide); + free(zTmpWide); + return zFilenameUtf8; +} + +/* +** Convert UTF-8 to multibyte character string. Space to hold the +** returned string is obtained from malloc(). +*/ +static char *utf8ToMbcs(const char *zFilename){ + char *zFilenameMbcs; + WCHAR *zTmpWide; + + zTmpWide = utf8ToUnicode(zFilename); + if( zTmpWide==0 ){ + return 0; + } + zFilenameMbcs = unicodeToMbcs(zTmpWide); + free(zTmpWide); + return zFilenameMbcs; +} + +#if OS_WINCE +/************************************************************************* +** This section contains code for WinCE only. +*/ +/* +** WindowsCE does not have a localtime() function. So create a +** substitute. +*/ +#include +struct tm *__cdecl localtime(const time_t *t) +{ + static struct tm y; + FILETIME uTm, lTm; + SYSTEMTIME pTm; + sqlite3_int64 t64; + t64 = *t; + t64 = (t64 + 11644473600)*10000000; + uTm.dwLowDateTime = t64 & 0xFFFFFFFF; + uTm.dwHighDateTime= t64 >> 32; + FileTimeToLocalFileTime(&uTm,&lTm); + FileTimeToSystemTime(&lTm,&pTm); + y.tm_year = pTm.wYear - 1900; + y.tm_mon = pTm.wMonth - 1; + y.tm_wday = pTm.wDayOfWeek; + y.tm_mday = pTm.wDay; + y.tm_hour = pTm.wHour; + y.tm_min = pTm.wMinute; + y.tm_sec = pTm.wSecond; + return &y; +} + +/* This will never be called, but defined to make the code compile */ +#define GetTempPathA(a,b) + +#define LockFile(a,b,c,d,e) winceLockFile(&a, b, c, d, e) +#define UnlockFile(a,b,c,d,e) winceUnlockFile(&a, b, c, d, e) +#define LockFileEx(a,b,c,d,e,f) winceLockFileEx(&a, b, c, d, e, f) + +#define HANDLE_TO_WINFILE(a) (winFile*)&((char*)a)[-offsetof(winFile,h)] + +/* +** Acquire a lock on the handle h +*/ +static void winceMutexAcquire(HANDLE h){ + DWORD dwErr; + do { + dwErr = WaitForSingleObject(h, INFINITE); + } while (dwErr != WAIT_OBJECT_0 && dwErr != WAIT_ABANDONED); +} +/* +** Release a lock acquired by winceMutexAcquire() +*/ +#define winceMutexRelease(h) ReleaseMutex(h) + +/* +** Create the mutex and shared memory used for locking in the file +** descriptor pFile +*/ +static BOOL winceCreateLock(const char *zFilename, winFile *pFile){ + WCHAR *zTok; + WCHAR *zName = utf8ToUnicode(zFilename); + BOOL bInit = TRUE; + + /* Initialize the local lockdata */ + ZeroMemory(&pFile->local, sizeof(pFile->local)); + + /* Replace the backslashes from the filename and lowercase it + ** to derive a mutex name. */ + zTok = CharLowerW(zName); + for (;*zTok;zTok++){ + if (*zTok == '\\') *zTok = '_'; + } + + /* Create/open the named mutex */ + pFile->hMutex = CreateMutexW(NULL, FALSE, zName); + if (!pFile->hMutex){ + free(zName); + return FALSE; + } + + /* Acquire the mutex before continuing */ + winceMutexAcquire(pFile->hMutex); + + /* Since the names of named mutexes, semaphores, file mappings etc are + ** case-sensitive, take advantage of that by uppercasing the mutex name + ** and using that as the shared filemapping name. + */ + CharUpperW(zName); + pFile->hShared = CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, + PAGE_READWRITE, 0, sizeof(winceLock), + zName); + + /* Set a flag that indicates we're the first to create the memory so it + ** must be zero-initialized */ + if (GetLastError() == ERROR_ALREADY_EXISTS){ + bInit = FALSE; + } + + free(zName); + + /* If we succeeded in making the shared memory handle, map it. */ + if (pFile->hShared){ + pFile->shared = (winceLock*)MapViewOfFile(pFile->hShared, + FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, sizeof(winceLock)); + /* If mapping failed, close the shared memory handle and erase it */ + if (!pFile->shared){ + CloseHandle(pFile->hShared); + pFile->hShared = NULL; + } + } + + /* If shared memory could not be created, then close the mutex and fail */ + if (pFile->hShared == NULL){ + winceMutexRelease(pFile->hMutex); + CloseHandle(pFile->hMutex); + pFile->hMutex = NULL; + return FALSE; + } + + /* Initialize the shared memory if we're supposed to */ + if (bInit) { + ZeroMemory(pFile->shared, sizeof(winceLock)); + } + + winceMutexRelease(pFile->hMutex); + return TRUE; +} + +/* +** Destroy the part of winFile that deals with wince locks +*/ +static void winceDestroyLock(winFile *pFile){ + if (pFile->hMutex){ + /* Acquire the mutex */ + winceMutexAcquire(pFile->hMutex); + + /* The following blocks should probably assert in debug mode, but they + are to cleanup in case any locks remained open */ + if (pFile->local.nReaders){ + pFile->shared->nReaders --; + } + if (pFile->local.bReserved){ + pFile->shared->bReserved = FALSE; + } + if (pFile->local.bPending){ + pFile->shared->bPending = FALSE; + } + if (pFile->local.bExclusive){ + pFile->shared->bExclusive = FALSE; + } + + /* De-reference and close our copy of the shared memory handle */ + UnmapViewOfFile(pFile->shared); + CloseHandle(pFile->hShared); + + if( pFile->zDeleteOnClose ){ + DeleteFileW(pFile->zDeleteOnClose); + free(pFile->zDeleteOnClose); + pFile->zDeleteOnClose = 0; + } + + /* Done with the mutex */ + winceMutexRelease(pFile->hMutex); + CloseHandle(pFile->hMutex); + pFile->hMutex = NULL; + } +} + +/* +** An implementation of the LockFile() API of windows for wince +*/ +static BOOL winceLockFile( + HANDLE *phFile, + DWORD dwFileOffsetLow, + DWORD dwFileOffsetHigh, + DWORD nNumberOfBytesToLockLow, + DWORD nNumberOfBytesToLockHigh +){ + winFile *pFile = HANDLE_TO_WINFILE(phFile); + BOOL bReturn = FALSE; + + if (!pFile->hMutex) return TRUE; + winceMutexAcquire(pFile->hMutex); + + /* Wanting an exclusive lock? */ + if (dwFileOffsetLow == SHARED_FIRST + && nNumberOfBytesToLockLow == SHARED_SIZE){ + if (pFile->shared->nReaders == 0 && pFile->shared->bExclusive == 0){ + pFile->shared->bExclusive = TRUE; + pFile->local.bExclusive = TRUE; + bReturn = TRUE; + } + } + + /* Want a read-only lock? */ + else if ((dwFileOffsetLow >= SHARED_FIRST && + dwFileOffsetLow < SHARED_FIRST + SHARED_SIZE) && + nNumberOfBytesToLockLow == 1){ + if (pFile->shared->bExclusive == 0){ + pFile->local.nReaders ++; + if (pFile->local.nReaders == 1){ + pFile->shared->nReaders ++; + } + bReturn = TRUE; + } + } + + /* Want a pending lock? */ + else if (dwFileOffsetLow == PENDING_BYTE && nNumberOfBytesToLockLow == 1){ + /* If no pending lock has been acquired, then acquire it */ + if (pFile->shared->bPending == 0) { + pFile->shared->bPending = TRUE; + pFile->local.bPending = TRUE; + bReturn = TRUE; + } + } + /* Want a reserved lock? */ + else if (dwFileOffsetLow == RESERVED_BYTE && nNumberOfBytesToLockLow == 1){ + if (pFile->shared->bReserved == 0) { + pFile->shared->bReserved = TRUE; + pFile->local.bReserved = TRUE; + bReturn = TRUE; + } + } + + winceMutexRelease(pFile->hMutex); + return bReturn; +} + +/* +** An implementation of the UnlockFile API of windows for wince +*/ +static BOOL winceUnlockFile( + HANDLE *phFile, + DWORD dwFileOffsetLow, + DWORD dwFileOffsetHigh, + DWORD nNumberOfBytesToUnlockLow, + DWORD nNumberOfBytesToUnlockHigh +){ + winFile *pFile = HANDLE_TO_WINFILE(phFile); + BOOL bReturn = FALSE; + + if (!pFile->hMutex) return TRUE; + winceMutexAcquire(pFile->hMutex); + + /* Releasing a reader lock or an exclusive lock */ + if (dwFileOffsetLow >= SHARED_FIRST && + dwFileOffsetLow < SHARED_FIRST + SHARED_SIZE){ + /* Did we have an exclusive lock? */ + if (pFile->local.bExclusive){ + pFile->local.bExclusive = FALSE; + pFile->shared->bExclusive = FALSE; + bReturn = TRUE; + } + + /* Did we just have a reader lock? */ + else if (pFile->local.nReaders){ + pFile->local.nReaders --; + if (pFile->local.nReaders == 0) + { + pFile->shared->nReaders --; + } + bReturn = TRUE; + } + } + + /* Releasing a pending lock */ + else if (dwFileOffsetLow == PENDING_BYTE && nNumberOfBytesToUnlockLow == 1){ + if (pFile->local.bPending){ + pFile->local.bPending = FALSE; + pFile->shared->bPending = FALSE; + bReturn = TRUE; + } + } + /* Releasing a reserved lock */ + else if (dwFileOffsetLow == RESERVED_BYTE && nNumberOfBytesToUnlockLow == 1){ + if (pFile->local.bReserved) { + pFile->local.bReserved = FALSE; + pFile->shared->bReserved = FALSE; + bReturn = TRUE; + } + } + + winceMutexRelease(pFile->hMutex); + return bReturn; +} + +/* +** An implementation of the LockFileEx() API of windows for wince +*/ +static BOOL winceLockFileEx( + HANDLE *phFile, + DWORD dwFlags, + DWORD dwReserved, + DWORD nNumberOfBytesToLockLow, + DWORD nNumberOfBytesToLockHigh, + LPOVERLAPPED lpOverlapped +){ + /* If the caller wants a shared read lock, forward this call + ** to winceLockFile */ + if (lpOverlapped->Offset == SHARED_FIRST && + dwFlags == 1 && + nNumberOfBytesToLockLow == SHARED_SIZE){ + return winceLockFile(phFile, SHARED_FIRST, 0, 1, 0); + } + return FALSE; +} +/* +** End of the special code for wince +*****************************************************************************/ +#endif /* OS_WINCE */ + +/***************************************************************************** +** The next group of routines implement the I/O methods specified +** by the sqlite3_io_methods object. +******************************************************************************/ + +/* +** Close a file. +** +** It is reported that an attempt to close a handle might sometimes +** fail. This is a very unreasonable result, but windows is notorious +** for being unreasonable so I do not doubt that it might happen. If +** the close fails, we pause for 100 milliseconds and try again. As +** many as MX_CLOSE_ATTEMPT attempts to close the handle are made before +** giving up and returning an error. +*/ +#define MX_CLOSE_ATTEMPT 3 +static int winClose(sqlite3_file *id){ + int rc, cnt = 0; + winFile *pFile = (winFile*)id; + OSTRACE2("CLOSE %d\n", pFile->h); + do{ + rc = CloseHandle(pFile->h); + }while( rc==0 && cnt++ < MX_CLOSE_ATTEMPT && (Sleep(100), 1) ); +#if OS_WINCE + winceDestroyLock(pFile); +#endif + OpenCounter(-1); + return rc ? SQLITE_OK : SQLITE_IOERR; +} + +/* +** Some microsoft compilers lack this definition. +*/ +#ifndef INVALID_SET_FILE_POINTER +# define INVALID_SET_FILE_POINTER ((DWORD)-1) +#endif + +/* +** Read data from a file into a buffer. Return SQLITE_OK if all +** bytes were read successfully and SQLITE_IOERR if anything goes +** wrong. +*/ +static int winRead( + sqlite3_file *id, /* File to read from */ + void *pBuf, /* Write content into this buffer */ + int amt, /* Number of bytes to read */ + sqlite3_int64 offset /* Begin reading at this offset */ +){ + LONG upperBits = (offset>>32) & 0x7fffffff; + LONG lowerBits = offset & 0xffffffff; + DWORD rc; + DWORD got; + winFile *pFile = (winFile*)id; + assert( id!=0 ); + SimulateIOError(return SQLITE_IOERR_READ); + OSTRACE3("READ %d lock=%d\n", pFile->h, pFile->locktype); + rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + if( rc==INVALID_SET_FILE_POINTER && GetLastError()!=NO_ERROR ){ + return SQLITE_FULL; + } + if( !ReadFile(pFile->h, pBuf, amt, &got, 0) ){ + return SQLITE_IOERR_READ; + } + if( got==(DWORD)amt ){ + return SQLITE_OK; + }else{ + memset(&((char*)pBuf)[got], 0, amt-got); + return SQLITE_IOERR_SHORT_READ; + } +} + +/* +** Write data from a buffer into a file. Return SQLITE_OK on success +** or some other error code on failure. +*/ +static int winWrite( + sqlite3_file *id, /* File to write into */ + const void *pBuf, /* The bytes to be written */ + int amt, /* Number of bytes to write */ + sqlite3_int64 offset /* Offset into the file to begin writing at */ +){ + LONG upperBits = (offset>>32) & 0x7fffffff; + LONG lowerBits = offset & 0xffffffff; + DWORD rc; + DWORD wrote; + winFile *pFile = (winFile*)id; + assert( id!=0 ); + SimulateIOError(return SQLITE_IOERR_WRITE); + SimulateDiskfullError(return SQLITE_FULL); + OSTRACE3("WRITE %d lock=%d\n", pFile->h, pFile->locktype); + rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + if( rc==INVALID_SET_FILE_POINTER && GetLastError()!=NO_ERROR ){ + return SQLITE_FULL; + } + assert( amt>0 ); + while( + amt>0 + && (rc = WriteFile(pFile->h, pBuf, amt, &wrote, 0))!=0 + && wrote>0 + ){ + amt -= wrote; + pBuf = &((char*)pBuf)[wrote]; + } + if( !rc || amt>(int)wrote ){ + return SQLITE_FULL; + } + return SQLITE_OK; +} + +/* +** Truncate an open file to a specified size +*/ +static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ + LONG upperBits = (nByte>>32) & 0x7fffffff; + LONG lowerBits = nByte & 0xffffffff; + winFile *pFile = (winFile*)id; + OSTRACE3("TRUNCATE %d %lld\n", pFile->h, nByte); + SimulateIOError(return SQLITE_IOERR_TRUNCATE); + SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + SetEndOfFile(pFile->h); + return SQLITE_OK; +} + +#ifdef SQLITE_TEST +/* +** Count the number of fullsyncs and normal syncs. This is used to test +** that syncs and fullsyncs are occuring at the right times. +*/ +int sqlite3_sync_count = 0; +int sqlite3_fullsync_count = 0; +#endif + +/* +** Make sure all writes to a particular file are committed to disk. +*/ +static int winSync(sqlite3_file *id, int flags){ + winFile *pFile = (winFile*)id; + OSTRACE3("SYNC %d lock=%d\n", pFile->h, pFile->locktype); +#ifdef SQLITE_TEST + if( flags & SQLITE_SYNC_FULL ){ + sqlite3_fullsync_count++; + } + sqlite3_sync_count++; +#endif + if( FlushFileBuffers(pFile->h) ){ + return SQLITE_OK; + }else{ + return SQLITE_IOERR; + } +} + +/* +** Determine the current size of a file in bytes +*/ +static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){ + winFile *pFile = (winFile*)id; + DWORD upperBits, lowerBits; + SimulateIOError(return SQLITE_IOERR_FSTAT); + lowerBits = GetFileSize(pFile->h, &upperBits); + *pSize = (((sqlite3_int64)upperBits)<<32) + lowerBits; + return SQLITE_OK; +} + +/* +** LOCKFILE_FAIL_IMMEDIATELY is undefined on some Windows systems. +*/ +#ifndef LOCKFILE_FAIL_IMMEDIATELY +# define LOCKFILE_FAIL_IMMEDIATELY 1 +#endif + +/* +** Acquire a reader lock. +** Different API routines are called depending on whether or not this +** is Win95 or WinNT. +*/ +static int getReadLock(winFile *pFile){ + int res; + if( isNT() ){ + OVERLAPPED ovlp; + ovlp.Offset = SHARED_FIRST; + ovlp.OffsetHigh = 0; + ovlp.hEvent = 0; + res = LockFileEx(pFile->h, LOCKFILE_FAIL_IMMEDIATELY, + 0, SHARED_SIZE, 0, &ovlp); + }else{ + int lk; + sqlite3Randomness(sizeof(lk), &lk); + pFile->sharedLockByte = (lk & 0x7fffffff)%(SHARED_SIZE - 1); + res = LockFile(pFile->h, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); + } + return res; +} + +/* +** Undo a readlock +*/ +static int unlockReadLock(winFile *pFile){ + int res; + if( isNT() ){ + res = UnlockFile(pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); + }else{ + res = UnlockFile(pFile->h, SHARED_FIRST + pFile->sharedLockByte, 0, 1, 0); + } + return res; +} + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. The winUnlock() routine +** erases all locks at once and returns us immediately to locking level 0. +** It is not possible to lower the locking level one step at a time. You +** must go straight to locking level 0. +*/ +static int winLock(sqlite3_file *id, int locktype){ + int rc = SQLITE_OK; /* Return code from subroutines */ + int res = 1; /* Result of a windows lock call */ + int newLocktype; /* Set pFile->locktype to this value before exiting */ + int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */ + winFile *pFile = (winFile*)id; + + assert( pFile!=0 ); + OSTRACE5("LOCK %d %d was %d(%d)\n", + pFile->h, locktype, pFile->locktype, pFile->sharedLockByte); + + /* If there is already a lock of this type or more restrictive on the + ** OsFile, do nothing. Don't use the end_lock: exit path, as + ** sqlite3OsEnterMutex() hasn't been called yet. + */ + if( pFile->locktype>=locktype ){ + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct + */ + assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); + assert( locktype!=PENDING_LOCK ); + assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + + /* Lock the PENDING_LOCK byte if we need to acquire a PENDING lock or + ** a SHARED lock. If we are acquiring a SHARED lock, the acquisition of + ** the PENDING_LOCK byte is temporary. + */ + newLocktype = pFile->locktype; + if( pFile->locktype==NO_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktype==RESERVED_LOCK) + ){ + int cnt = 3; + while( cnt-->0 && (res = LockFile(pFile->h, PENDING_BYTE, 0, 1, 0))==0 ){ + /* Try 3 times to get the pending lock. The pending lock might be + ** held by another reader process who will release it momentarily. + */ + OSTRACE2("could not get a PENDING lock. cnt=%d\n", cnt); + Sleep(1); + } + gotPendingLock = res; + } + + /* Acquire a shared lock + */ + if( locktype==SHARED_LOCK && res ){ + assert( pFile->locktype==NO_LOCK ); + res = getReadLock(pFile); + if( res ){ + newLocktype = SHARED_LOCK; + } + } + + /* Acquire a RESERVED lock + */ + if( locktype==RESERVED_LOCK && res ){ + assert( pFile->locktype==SHARED_LOCK ); + res = LockFile(pFile->h, RESERVED_BYTE, 0, 1, 0); + if( res ){ + newLocktype = RESERVED_LOCK; + } + } + + /* Acquire a PENDING lock + */ + if( locktype==EXCLUSIVE_LOCK && res ){ + newLocktype = PENDING_LOCK; + gotPendingLock = 0; + } + + /* Acquire an EXCLUSIVE lock + */ + if( locktype==EXCLUSIVE_LOCK && res ){ + assert( pFile->locktype>=SHARED_LOCK ); + res = unlockReadLock(pFile); + OSTRACE2("unreadlock = %d\n", res); + res = LockFile(pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); + if( res ){ + newLocktype = EXCLUSIVE_LOCK; + }else{ + OSTRACE2("error-code = %d\n", GetLastError()); + getReadLock(pFile); + } + } + + /* If we are holding a PENDING lock that ought to be released, then + ** release it now. + */ + if( gotPendingLock && locktype==SHARED_LOCK ){ + UnlockFile(pFile->h, PENDING_BYTE, 0, 1, 0); + } + + /* Update the state of the lock has held in the file descriptor then + ** return the appropriate result code. + */ + if( res ){ + rc = SQLITE_OK; + }else{ + OSTRACE4("LOCK FAILED %d trying for %d but got %d\n", pFile->h, + locktype, newLocktype); + rc = SQLITE_BUSY; + } + pFile->locktype = newLocktype; + return rc; +} + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, return +** non-zero, otherwise zero. +*/ +static int winCheckReservedLock(sqlite3_file *id){ + int rc; + winFile *pFile = (winFile*)id; + assert( pFile!=0 ); + if( pFile->locktype>=RESERVED_LOCK ){ + rc = 1; + OSTRACE3("TEST WR-LOCK %d %d (local)\n", pFile->h, rc); + }else{ + rc = LockFile(pFile->h, RESERVED_BYTE, 0, 1, 0); + if( rc ){ + UnlockFile(pFile->h, RESERVED_BYTE, 0, 1, 0); + } + rc = !rc; + OSTRACE3("TEST WR-LOCK %d %d (remote)\n", pFile->h, rc); + } + return rc; +} + +/* +** Lower the locking level on file descriptor id to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +** +** It is not possible for this routine to fail if the second argument +** is NO_LOCK. If the second argument is SHARED_LOCK then this routine +** might return SQLITE_IOERR; +*/ +static int winUnlock(sqlite3_file *id, int locktype){ + int type; + winFile *pFile = (winFile*)id; + int rc = SQLITE_OK; + assert( pFile!=0 ); + assert( locktype<=SHARED_LOCK ); + OSTRACE5("UNLOCK %d to %d was %d(%d)\n", pFile->h, locktype, + pFile->locktype, pFile->sharedLockByte); + type = pFile->locktype; + if( type>=EXCLUSIVE_LOCK ){ + UnlockFile(pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); + if( locktype==SHARED_LOCK && !getReadLock(pFile) ){ + /* This should never happen. We should always be able to + ** reacquire the read lock */ + rc = SQLITE_IOERR_UNLOCK; + } + } + if( type>=RESERVED_LOCK ){ + UnlockFile(pFile->h, RESERVED_BYTE, 0, 1, 0); + } + if( locktype==NO_LOCK && type>=SHARED_LOCK ){ + unlockReadLock(pFile); + } + if( type>=PENDING_LOCK ){ + UnlockFile(pFile->h, PENDING_BYTE, 0, 1, 0); + } + pFile->locktype = locktype; + return rc; +} + +/* +** Control and query of the open file handle. +*/ +static int winFileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + *(int*)pArg = ((winFile*)id)->locktype; + return SQLITE_OK; + } + } + return SQLITE_ERROR; +} + +/* +** Return the sector size in bytes of the underlying block device for +** the specified file. This is almost always 512 bytes, but may be +** larger for some devices. +** +** SQLite code assumes this function cannot fail. It also assumes that +** if two files are created in the same file-system directory (i.e. +** a database and it's journal file) that the sector size will be the +** same for both. +*/ +static int winSectorSize(sqlite3_file *id){ + return SQLITE_DEFAULT_SECTOR_SIZE; +} + +/* +** Return a vector of device characteristics. +*/ +static int winDeviceCharacteristics(sqlite3_file *id){ + return 0; +} + +/* +** This vector defines all the methods that can operate on an +** sqlite3_file for win32. +*/ +static const sqlite3_io_methods winIoMethod = { + 1, /* iVersion */ + winClose, + winRead, + winWrite, + winTruncate, + winSync, + winFileSize, + winLock, + winUnlock, + winCheckReservedLock, + winFileControl, + winSectorSize, + winDeviceCharacteristics +}; + +/*************************************************************************** +** Here ends the I/O methods that form the sqlite3_io_methods object. +** +** The next block of code implements the VFS methods. +****************************************************************************/ + +/* +** Convert a UTF-8 filename into whatever form the underlying +** operating system wants filenames in. Space to hold the result +** is obtained from malloc and must be freed by the calling +** function. +*/ +static void *convertUtf8Filename(const char *zFilename){ + void *zConverted = 0; + if( isNT() ){ + zConverted = utf8ToUnicode(zFilename); + }else{ + zConverted = utf8ToMbcs(zFilename); + } + /* caller will handle out of memory */ + return zConverted; +} + +/* +** Open a file. +*/ +static int winOpen( + sqlite3_vfs *pVfs, /* Not used */ + const char *zName, /* Name of the file (UTF-8) */ + sqlite3_file *id, /* Write the SQLite file handle here */ + int flags, /* Open mode flags */ + int *pOutFlags /* Status return flags */ +){ + HANDLE h; + DWORD dwDesiredAccess; + DWORD dwShareMode; + DWORD dwCreationDisposition; + DWORD dwFlagsAndAttributes = 0; + winFile *pFile = (winFile*)id; + void *zConverted = convertUtf8Filename(zName); + if( zConverted==0 ){ + return SQLITE_NOMEM; + } + + if( flags & SQLITE_OPEN_READWRITE ){ + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + }else{ + dwDesiredAccess = GENERIC_READ; + } + if( flags & SQLITE_OPEN_CREATE ){ + dwCreationDisposition = OPEN_ALWAYS; + }else{ + dwCreationDisposition = OPEN_EXISTING; + } + if( flags & SQLITE_OPEN_MAIN_DB ){ + dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; + }else{ + dwShareMode = 0; + } + if( flags & (SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_TEMP_JOURNAL + | SQLITE_OPEN_SUBJOURNAL) ){ + dwFlagsAndAttributes = FILE_ATTRIBUTE_TEMPORARY + | FILE_ATTRIBUTE_HIDDEN + | FILE_FLAG_DELETE_ON_CLOSE; + }else{ + dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; + } + if( flags & (SQLITE_OPEN_MAIN_DB | SQLITE_OPEN_TEMP_DB) ){ + dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS; + }else{ + dwFlagsAndAttributes |= FILE_FLAG_SEQUENTIAL_SCAN; + } + if( isNT() ){ + h = CreateFileW((WCHAR*)zConverted, + dwDesiredAccess, + dwShareMode, + NULL, + dwCreationDisposition, + dwFlagsAndAttributes, + NULL + ); + }else{ +#if OS_WINCE + return SQLITE_NOMEM; +#else + h = CreateFileA((char*)zConverted, + dwDesiredAccess, + dwShareMode, + NULL, + dwCreationDisposition, + dwFlagsAndAttributes, + NULL + ); +#endif + } + if( h==INVALID_HANDLE_VALUE ){ + free(zConverted); + if( flags & SQLITE_OPEN_READWRITE ){ + return winOpen(0, zName, id, + ((flags|SQLITE_OPEN_READONLY)&~SQLITE_OPEN_READWRITE), pOutFlags); + }else{ + return SQLITE_CANTOPEN; + } + } + if( pOutFlags ){ + if( flags & SQLITE_OPEN_READWRITE ){ + *pOutFlags = SQLITE_OPEN_READWRITE; + }else{ + *pOutFlags = SQLITE_OPEN_READONLY; + } + } + memset(pFile, 0, sizeof(*pFile)); + pFile->pMethod = &winIoMethod; + pFile->h = h; +#if OS_WINCE + if( (flags & (SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB)) == + (SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB) + && !winceCreateLock(zFilename, pFile) + ){ + CloseHandle(h); + free(zConverted); + return SQLITE_CANTOPEN; + } + if( dwFlagsAndAttributes & FILE_FLAG_DELETE_ON_CLOSE ){ + pFile->zDeleteOnClose = zConverted; + }else +#endif + { + free(zConverted); + } + OpenCounter(+1); + return SQLITE_OK; +} + +/* +** Delete the named file. +** +** Note that windows does not allow a file to be deleted if some other +** process has it open. Sometimes a virus scanner or indexing program +** will open a journal file shortly after it is created in order to do +** whatever it is it does. While this other process is holding the +** file open, we will be unable to delete it. To work around this +** problem, we delay 100 milliseconds and try to delete again. Up +** to MX_DELETION_ATTEMPTs deletion attempts are run before giving +** up and returning an error. +*/ +#define MX_DELETION_ATTEMPTS 3 +static int winDelete( + sqlite3_vfs *pVfs, /* Not used on win32 */ + const char *zFilename, /* Name of file to delete */ + int syncDir /* Not used on win32 */ +){ + int cnt = 0; + int rc; + void *zConverted = convertUtf8Filename(zFilename); + if( zConverted==0 ){ + return SQLITE_NOMEM; + } + SimulateIOError(return SQLITE_IOERR_DELETE); + if( isNT() ){ + do{ + rc = DeleteFileW(zConverted); + }while( rc==0 && GetFileAttributesW(zConverted)!=0xffffffff + && cnt++ < MX_DELETION_ATTEMPTS && (Sleep(100), 1) ); + }else{ +#if OS_WINCE + return SQLITE_NOMEM; +#else + do{ + rc = DeleteFileA(zConverted); + }while( rc==0 && GetFileAttributesA(zConverted)!=0xffffffff + && cnt++ < MX_DELETION_ATTEMPTS && (Sleep(100), 1) ); +#endif + } + free(zConverted); + OSTRACE2("DELETE \"%s\"\n", zFilename); + return rc!=0 ? SQLITE_OK : SQLITE_IOERR; +} + +/* +** Check the existance and status of a file. +*/ +static int winAccess( + sqlite3_vfs *pVfs, /* Not used on win32 */ + const char *zFilename, /* Name of file to check */ + int flags /* Type of test to make on this file */ +){ + DWORD attr; + int rc; + void *zConverted = convertUtf8Filename(zFilename); + if( zConverted==0 ){ + return SQLITE_NOMEM; + } + if( isNT() ){ + attr = GetFileAttributesW((WCHAR*)zConverted); + }else{ +#if OS_WINCE + return SQLITE_NOMEM; +#else + attr = GetFileAttributesA((char*)zConverted); +#endif + } + free(zConverted); + switch( flags ){ + case SQLITE_ACCESS_READ: + case SQLITE_ACCESS_EXISTS: + rc = attr!=0xffffffff; + break; + case SQLITE_ACCESS_READWRITE: + rc = (attr & FILE_ATTRIBUTE_READONLY)==0; + break; + default: + assert(!"Invalid flags argument"); + } + return rc; +} + + +/* +** Create a temporary file name in zBuf. zBuf must be big enough to +** hold at pVfs->mxPathname characters. +*/ +static int winGetTempname(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + static char zChars[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789"; + int i, j; + char zTempPath[MAX_PATH+1]; + if( sqlite3_temp_directory ){ + sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", sqlite3_temp_directory); + }else if( isNT() ){ + char *zMulti; + WCHAR zWidePath[MAX_PATH]; + GetTempPathW(MAX_PATH-30, zWidePath); + zMulti = unicodeToUtf8(zWidePath); + if( zMulti ){ + sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", zMulti); + free(zMulti); + }else{ + return SQLITE_NOMEM; + } + }else{ + char *zUtf8; + char zMbcsPath[MAX_PATH]; + GetTempPathA(MAX_PATH-30, zMbcsPath); + zUtf8 = mbcsToUtf8(zMbcsPath); + if( zUtf8 ){ + sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", zUtf8); + free(zUtf8); + }else{ + return SQLITE_NOMEM; + } + } + for(i=strlen(zTempPath); i>0 && zTempPath[i-1]=='\\'; i--){} + zTempPath[i] = 0; + sqlite3_snprintf(pVfs->mxPathname-30, zBuf, + "%s\\"SQLITE_TEMP_FILE_PREFIX, zTempPath); + j = strlen(zBuf); + sqlite3Randomness(20, &zBuf[j]); + for(i=0; i<20; i++, j++){ + zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; + } + zBuf[j] = 0; + OSTRACE2("TEMP FILENAME: %s\n", zBuf); + return SQLITE_OK; +} + +/* +** Turn a relative pathname into a full pathname. Write the full +** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname +** bytes in size. +*/ +static int winFullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zRelative, /* Possibly relative input path */ + int nFull, /* Size of output buffer in bytes */ + char *zFull /* Output buffer */ +){ + +#if defined(__CYGWIN__) + cygwin_conv_to_full_win32_path(zRelative, zFull); + return SQLITE_OK; +#endif + +#if OS_WINCE + /* WinCE has no concept of a relative pathname, or so I am told. */ + sqlite3_snprintf(pVfs->mxPathname, zFull, "%s", zRelative); +#endif + +#if !OS_WINCE && !defined(__CYGWIN__) + int nByte; + void *zConverted; + char *zOut; + zConverted = convertUtf8Filename(zRelative); + if( isNT() ){ + WCHAR *zTemp; + nByte = GetFullPathNameW((WCHAR*)zConverted, 0, 0, 0) + 3; + zTemp = malloc( nByte*sizeof(zTemp[0]) ); + if( zTemp==0 ){ + free(zConverted); + return SQLITE_NOMEM; + } + GetFullPathNameW((WCHAR*)zConverted, nByte, zTemp, 0); + free(zConverted); + zOut = unicodeToUtf8(zTemp); + free(zTemp); + }else{ + char *zTemp; + nByte = GetFullPathNameA((char*)zConverted, 0, 0, 0) + 3; + zTemp = malloc( nByte*sizeof(zTemp[0]) ); + if( zTemp==0 ){ + free(zConverted); + return SQLITE_NOMEM; + } + GetFullPathNameA((char*)zConverted, nByte, zTemp, 0); + free(zConverted); + zOut = mbcsToUtf8(zTemp); + free(zTemp); + } + if( zOut ){ + sqlite3_snprintf(pVfs->mxPathname, zFull, "%s", zOut); + free(zOut); + return SQLITE_OK; + }else{ + return SQLITE_NOMEM; + } +#endif +} + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ + HANDLE h; + void *zConverted = convertUtf8Filename(zFilename); + if( zConverted==0 ){ + return 0; + } + if( isNT() ){ + h = LoadLibraryW((WCHAR*)zConverted); + }else{ +#if OS_WINCE + return 0; +#else + h = LoadLibraryA((char*)zConverted); +#endif + } + free(zConverted); + return (void*)h; +} +static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ + FormatMessage( + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + GetLastError(), + 0, + zBufOut, + nBuf-1, + 0 + ); +} +void *winDlSym(sqlite3_vfs *pVfs, void *pHandle, const char *zSymbol){ +#if OS_WINCE + /* The GetProcAddressA() routine is only available on wince. */ + return GetProcAddressA((HANDLE)pHandle, zSymbol); +#else + /* All other windows platforms expect GetProcAddress() to take + ** an Ansi string regardless of the _UNICODE setting */ + return GetProcAddress((HANDLE)pHandle, zSymbol); +#endif +} +void winDlClose(sqlite3_vfs *pVfs, void *pHandle){ + FreeLibrary((HANDLE)pHandle); +} +#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ + #define winDlOpen 0 + #define winDlError 0 + #define winDlSym 0 + #define winDlClose 0 +#endif + + +/* +** Write up to nBuf bytes of randomness into zBuf. +*/ +static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + int n = 0; + if( sizeof(SYSTEMTIME)<=nBuf-n ){ + SYSTEMTIME x; + GetSystemTime(&x); + memcpy(&zBuf[n], &x, sizeof(x)); + n += sizeof(x); + } + if( sizeof(DWORD)<=nBuf-n ){ + DWORD pid = GetCurrentProcessId(); + memcpy(&zBuf[n], &pid, sizeof(pid)); + n += sizeof(pid); + } + if( sizeof(DWORD)<=nBuf-n ){ + DWORD cnt = GetTickCount(); + memcpy(&zBuf[n], &cnt, sizeof(cnt)); + n += sizeof(cnt); + } + if( sizeof(LARGE_INTEGER)<=nBuf-n ){ + LARGE_INTEGER i; + QueryPerformanceCounter(&i); + memcpy(&zBuf[n], &i, sizeof(i)); + n += sizeof(i); + } + return n; +} + + +/* +** Sleep for a little while. Return the amount of time slept. +*/ +static int winSleep(sqlite3_vfs *pVfs, int microsec){ + Sleep((microsec+999)/1000); + return ((microsec+999)/1000)*1000; +} + +/* +** The following variable, if set to a non-zero value, becomes the result +** returned from sqlite3OsCurrentTime(). This is used for testing. +*/ +#ifdef SQLITE_TEST +int sqlite3_current_time = 0; +#endif + +/* +** Find the current time (in Universal Coordinated Time). Write the +** current time and date as a Julian Day number into *prNow and +** return 0. Return 1 if the time and date cannot be found. +*/ +int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ + FILETIME ft; + /* FILETIME structure is a 64-bit value representing the number of + 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5). + */ + double now; +#if OS_WINCE + SYSTEMTIME time; + GetSystemTime(&time); + SystemTimeToFileTime(&time,&ft); +#else + GetSystemTimeAsFileTime( &ft ); +#endif + now = ((double)ft.dwHighDateTime) * 4294967296.0; + *prNow = (now + ft.dwLowDateTime)/864000000000.0 + 2305813.5; +#ifdef SQLITE_TEST + if( sqlite3_current_time ){ + *prNow = sqlite3_current_time/86400.0 + 2440587.5; + } +#endif + return 0; +} + + +/* +** Return a pointer to the sqlite3DefaultVfs structure. We use +** a function rather than give the structure global scope because +** some compilers (MSVC) do not allow forward declarations of +** initialized structures. +*/ +sqlite3_vfs *sqlite3OsDefaultVfs(void){ + static sqlite3_vfs winVfs = { + 1, /* iVersion */ + sizeof(winFile), /* szOsFile */ + MAX_PATH, /* mxPathname */ + 0, /* pNext */ + "win32", /* zName */ + 0, /* pAppData */ + + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winGetTempname, /* xGetTempName */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime /* xCurrentTime */ + }; + + return &winVfs; +} + +#endif /* OS_WIN */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/pager.c b/libraries/sqlite/unix/sqlite-3.5.1/src/pager.c new file mode 100644 index 0000000000..b0ad7158bc --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/pager.c @@ -0,0 +1,5104 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of the page cache subsystem or "pager". +** +** The pager is used to access a database disk file. It implements +** atomic commit and rollback through the use of a journal file that +** is separate from the database file. The pager also implements file +** locking to prevent two processes from writing the same database +** file simultaneously, or one process from reading the database while +** another is writing. +** +** @(#) $Id: pager.c,v 1.392 2007/10/03 15:22:26 danielk1977 Exp $ +*/ +#ifndef SQLITE_OMIT_DISKIO +#include "sqliteInt.h" +#include +#include + +/* +** Macros for troubleshooting. Normally turned off +*/ +#if 0 +#define sqlite3DebugPrintf printf +#define PAGERTRACE1(X) sqlite3DebugPrintf(X) +#define PAGERTRACE2(X,Y) sqlite3DebugPrintf(X,Y) +#define PAGERTRACE3(X,Y,Z) sqlite3DebugPrintf(X,Y,Z) +#define PAGERTRACE4(X,Y,Z,W) sqlite3DebugPrintf(X,Y,Z,W) +#define PAGERTRACE5(X,Y,Z,W,V) sqlite3DebugPrintf(X,Y,Z,W,V) +#else +#define PAGERTRACE1(X) +#define PAGERTRACE2(X,Y) +#define PAGERTRACE3(X,Y,Z) +#define PAGERTRACE4(X,Y,Z,W) +#define PAGERTRACE5(X,Y,Z,W,V) +#endif + +/* +** The following two macros are used within the PAGERTRACEX() macros above +** to print out file-descriptors. +** +** PAGERID() takes a pointer to a Pager struct as it's argument. The +** associated file-descriptor is returned. FILEHANDLEID() takes an sqlite3_file +** struct as it's argument. +*/ +#define PAGERID(p) ((int)(p->fd)) +#define FILEHANDLEID(fd) ((int)fd) + +/* +** The page cache as a whole is always in one of the following +** states: +** +** PAGER_UNLOCK The page cache is not currently reading or +** writing the database file. There is no +** data held in memory. This is the initial +** state. +** +** PAGER_SHARED The page cache is reading the database. +** Writing is not permitted. There can be +** multiple readers accessing the same database +** file at the same time. +** +** PAGER_RESERVED This process has reserved the database for writing +** but has not yet made any changes. Only one process +** at a time can reserve the database. The original +** database file has not been modified so other +** processes may still be reading the on-disk +** database file. +** +** PAGER_EXCLUSIVE The page cache is writing the database. +** Access is exclusive. No other processes or +** threads can be reading or writing while one +** process is writing. +** +** PAGER_SYNCED The pager moves to this state from PAGER_EXCLUSIVE +** after all dirty pages have been written to the +** database file and the file has been synced to +** disk. All that remains to do is to remove or +** truncate the journal file and the transaction +** will be committed. +** +** The page cache comes up in PAGER_UNLOCK. The first time a +** sqlite3PagerGet() occurs, the state transitions to PAGER_SHARED. +** After all pages have been released using sqlite_page_unref(), +** the state transitions back to PAGER_UNLOCK. The first time +** that sqlite3PagerWrite() is called, the state transitions to +** PAGER_RESERVED. (Note that sqlite3PagerWrite() can only be +** called on an outstanding page which means that the pager must +** be in PAGER_SHARED before it transitions to PAGER_RESERVED.) +** PAGER_RESERVED means that there is an open rollback journal. +** The transition to PAGER_EXCLUSIVE occurs before any changes +** are made to the database file, though writes to the rollback +** journal occurs with just PAGER_RESERVED. After an sqlite3PagerRollback() +** or sqlite3PagerCommitPhaseTwo(), the state can go back to PAGER_SHARED, +** or it can stay at PAGER_EXCLUSIVE if we are in exclusive access mode. +*/ +#define PAGER_UNLOCK 0 +#define PAGER_SHARED 1 /* same as SHARED_LOCK */ +#define PAGER_RESERVED 2 /* same as RESERVED_LOCK */ +#define PAGER_EXCLUSIVE 4 /* same as EXCLUSIVE_LOCK */ +#define PAGER_SYNCED 5 + +/* +** If the SQLITE_BUSY_RESERVED_LOCK macro is set to true at compile-time, +** then failed attempts to get a reserved lock will invoke the busy callback. +** This is off by default. To see why, consider the following scenario: +** +** Suppose thread A already has a shared lock and wants a reserved lock. +** Thread B already has a reserved lock and wants an exclusive lock. If +** both threads are using their busy callbacks, it might be a long time +** be for one of the threads give up and allows the other to proceed. +** But if the thread trying to get the reserved lock gives up quickly +** (if it never invokes its busy callback) then the contention will be +** resolved quickly. +*/ +#ifndef SQLITE_BUSY_RESERVED_LOCK +# define SQLITE_BUSY_RESERVED_LOCK 0 +#endif + +/* +** This macro rounds values up so that if the value is an address it +** is guaranteed to be an address that is aligned to an 8-byte boundary. +*/ +#define FORCE_ALIGNMENT(X) (((X)+7)&~7) + +typedef struct PgHdr PgHdr; + +/* +** Each pager stores all currently unreferenced pages in a list sorted +** in least-recently-used (LRU) order (i.e. the first item on the list has +** not been referenced in a long time, the last item has been recently +** used). An instance of this structure is included as part of each +** pager structure for this purpose (variable Pager.lru). +** +** Additionally, if memory-management is enabled, all unreferenced pages +** are stored in a global LRU list (global variable sqlite3LruPageList). +** +** In both cases, the PagerLruList.pFirstSynced variable points to +** the first page in the corresponding list that does not require an +** fsync() operation before it's memory can be reclaimed. If no such +** page exists, PagerLruList.pFirstSynced is set to NULL. +*/ +typedef struct PagerLruList PagerLruList; +struct PagerLruList { + PgHdr *pFirst; /* First page in LRU list */ + PgHdr *pLast; /* Last page in LRU list (the most recently used) */ + PgHdr *pFirstSynced; /* First page in list with PgHdr.needSync==0 */ +}; + +/* +** The following structure contains the next and previous pointers used +** to link a PgHdr structure into a PagerLruList linked list. +*/ +typedef struct PagerLruLink PagerLruLink; +struct PagerLruLink { + PgHdr *pNext; + PgHdr *pPrev; +}; + +/* +** Each in-memory image of a page begins with the following header. +** This header is only visible to this pager module. The client +** code that calls pager sees only the data that follows the header. +** +** Client code should call sqlite3PagerWrite() on a page prior to making +** any modifications to that page. The first time sqlite3PagerWrite() +** is called, the original page contents are written into the rollback +** journal and PgHdr.inJournal and PgHdr.needSync are set. Later, once +** the journal page has made it onto the disk surface, PgHdr.needSync +** is cleared. The modified page cannot be written back into the original +** database file until the journal pages has been synced to disk and the +** PgHdr.needSync has been cleared. +** +** The PgHdr.dirty flag is set when sqlite3PagerWrite() is called and +** is cleared again when the page content is written back to the original +** database file. +** +** Details of important structure elements: +** +** needSync +** +** If this is true, this means that it is not safe to write the page +** content to the database because the original content needed +** for rollback has not by synced to the main rollback journal. +** The original content may have been written to the rollback journal +** but it has not yet been synced. So we cannot write to the database +** file because power failure might cause the page in the journal file +** to never reach the disk. It is as if the write to the journal file +** does not occur until the journal file is synced. +** +** This flag is false if the page content exactly matches what +** currently exists in the database file. The needSync flag is also +** false if the original content has been written to the main rollback +** journal and synced. If the page represents a new page that has +** been added onto the end of the database during the current +** transaction, the needSync flag is true until the original database +** size in the journal header has been synced to disk. +** +** inJournal +** +** This is true if the original page has been written into the main +** rollback journal. This is always false for new pages added to +** the end of the database file during the current transaction. +** And this flag says nothing about whether or not the journal +** has been synced to disk. For pages that are in the original +** database file, the following expression should always be true: +** +** inJournal = (pPager->aInJournal[(pgno-1)/8] & (1<<((pgno-1)%8))!=0 +** +** The pPager->aInJournal[] array is only valid for the original +** pages of the database, not new pages that are added to the end +** of the database, so obviously the above expression cannot be +** valid for new pages. For new pages inJournal is always 0. +** +** dirty +** +** When true, this means that the content of the page has been +** modified and needs to be written back to the database file. +** If false, it means that either the content of the page is +** unchanged or else the content is unimportant and we do not +** care whether or not it is preserved. +** +** alwaysRollback +** +** This means that the sqlite3PagerDontRollback() API should be +** ignored for this page. The DontRollback() API attempts to say +** that the content of the page on disk is unimportant (it is an +** unused page on the freelist) so that it is unnecessary to +** rollback changes to this page because the content of the page +** can change without changing the meaning of the database. This +** flag overrides any DontRollback() attempt. This flag is set +** when a page that originally contained valid data is added to +** the freelist. Later in the same transaction, this page might +** be pulled from the freelist and reused for something different +** and at that point the DontRollback() API will be called because +** pages taken from the freelist do not need to be protected by +** the rollback journal. But this flag says that the page was +** not originally part of the freelist so that it still needs to +** be rolled back in spite of any subsequent DontRollback() calls. +** +** needRead +** +** This flag means (when true) that the content of the page has +** not yet been loaded from disk. The in-memory content is just +** garbage. (Actually, we zero the content, but you should not +** make any assumptions about the content nevertheless.) If the +** content is needed in the future, it should be read from the +** original database file. +*/ +struct PgHdr { + Pager *pPager; /* The pager to which this page belongs */ + Pgno pgno; /* The page number for this page */ + PgHdr *pNextHash, *pPrevHash; /* Hash collision chain for PgHdr.pgno */ + PagerLruLink free; /* Next and previous free pages */ + PgHdr *pNextAll; /* A list of all pages */ + u8 inJournal; /* TRUE if has been written to journal */ + u8 dirty; /* TRUE if we need to write back changes */ + u8 needSync; /* Sync journal before writing this page */ + u8 alwaysRollback; /* Disable DontRollback() for this page */ + u8 needRead; /* Read content if PagerWrite() is called */ + short int nRef; /* Number of users of this page */ + PgHdr *pDirty, *pPrevDirty; /* Dirty pages */ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + PagerLruLink gfree; /* Global list of nRef==0 pages */ +#endif +#ifdef SQLITE_CHECK_PAGES + u32 pageHash; +#endif + void *pData; /* Page data */ + /* Pager.nExtra bytes of local data appended to this header */ +}; + +/* +** For an in-memory only database, some extra information is recorded about +** each page so that changes can be rolled back. (Journal files are not +** used for in-memory databases.) The following information is added to +** the end of every EXTRA block for in-memory databases. +** +** This information could have been added directly to the PgHdr structure. +** But then it would take up an extra 8 bytes of storage on every PgHdr +** even for disk-based databases. Splitting it out saves 8 bytes. This +** is only a savings of 0.8% but those percentages add up. +*/ +typedef struct PgHistory PgHistory; +struct PgHistory { + u8 *pOrig; /* Original page text. Restore to this on a full rollback */ + u8 *pStmt; /* Text as it was at the beginning of the current statement */ + PgHdr *pNextStmt, *pPrevStmt; /* List of pages in the statement journal */ + u8 inStmt; /* TRUE if in the statement subjournal */ +}; + +/* +** A macro used for invoking the codec if there is one +*/ +#ifdef SQLITE_HAS_CODEC +# define CODEC1(P,D,N,X) if( P->xCodec!=0 ){ P->xCodec(P->pCodecArg,D,N,X); } +# define CODEC2(P,D,N,X) ((char*)(P->xCodec!=0?P->xCodec(P->pCodecArg,D,N,X):D)) +#else +# define CODEC1(P,D,N,X) /* NO-OP */ +# define CODEC2(P,D,N,X) ((char*)D) +#endif + +/* +** Convert a pointer to a PgHdr into a pointer to its data +** and back again. +*/ +#define PGHDR_TO_DATA(P) ((P)->pData) +#define PGHDR_TO_EXTRA(G,P) ((void*)&((G)[1])) +#define PGHDR_TO_HIST(P,PGR) \ + ((PgHistory*)&((char*)(&(P)[1]))[(PGR)->nExtra]) + +/* +** A open page cache is an instance of the following structure. +** +** Pager.errCode may be set to SQLITE_IOERR, SQLITE_CORRUPT, or +** or SQLITE_FULL. Once one of the first three errors occurs, it persists +** and is returned as the result of every major pager API call. The +** SQLITE_FULL return code is slightly different. It persists only until the +** next successful rollback is performed on the pager cache. Also, +** SQLITE_FULL does not affect the sqlite3PagerGet() and sqlite3PagerLookup() +** APIs, they may still be used successfully. +*/ +struct Pager { + sqlite3_vfs *pVfs; /* OS functions to use for IO */ + u8 journalOpen; /* True if journal file descriptors is valid */ + u8 journalStarted; /* True if header of journal is synced */ + u8 useJournal; /* Use a rollback journal on this file */ + u8 noReadlock; /* Do not bother to obtain readlocks */ + u8 stmtOpen; /* True if the statement subjournal is open */ + u8 stmtInUse; /* True we are in a statement subtransaction */ + u8 stmtAutoopen; /* Open stmt journal when main journal is opened*/ + u8 noSync; /* Do not sync the journal if true */ + u8 fullSync; /* Do extra syncs of the journal for robustness */ + u8 sync_flags; /* One of SYNC_NORMAL or SYNC_FULL */ + u8 state; /* PAGER_UNLOCK, _SHARED, _RESERVED, etc. */ + u8 tempFile; /* zFilename is a temporary file */ + u8 readOnly; /* True for a read-only database */ + u8 needSync; /* True if an fsync() is needed on the journal */ + u8 dirtyCache; /* True if cached pages have changed */ + u8 alwaysRollback; /* Disable DontRollback() for all pages */ + u8 memDb; /* True to inhibit all file I/O */ + u8 setMaster; /* True if a m-j name has been written to jrnl */ + u8 doNotSync; /* Boolean. While true, do not spill the cache */ + u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ + u8 changeCountDone; /* Set after incrementing the change-counter */ + u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */ + int errCode; /* One of several kinds of errors */ + int dbSize; /* Number of pages in the file */ + int origDbSize; /* dbSize before the current change */ + int stmtSize; /* Size of database (in pages) at stmt_begin() */ + int nRec; /* Number of pages written to the journal */ + u32 cksumInit; /* Quasi-random value added to every checksum */ + int stmtNRec; /* Number of records in stmt subjournal */ + int nExtra; /* Add this many bytes to each in-memory page */ + int pageSize; /* Number of bytes in a page */ + int nPage; /* Total number of in-memory pages */ + int nRef; /* Number of in-memory pages with PgHdr.nRef>0 */ + int mxPage; /* Maximum number of pages to hold in cache */ + Pgno mxPgno; /* Maximum allowed size of the database */ + u8 *aInJournal; /* One bit for each page in the database file */ + u8 *aInStmt; /* One bit for each page in the database */ + char *zFilename; /* Name of the database file */ + char *zJournal; /* Name of the journal file */ + char *zDirectory; /* Directory hold database and journal files */ + char *zStmtJrnl; /* Name of the statement journal file */ + sqlite3_file *fd, *jfd; /* File descriptors for database and journal */ + sqlite3_file *stfd; /* File descriptor for the statement subjournal*/ + BusyHandler *pBusyHandler; /* Pointer to sqlite.busyHandler */ + PagerLruList lru; /* LRU list of free pages */ + PgHdr *pAll; /* List of all pages */ + PgHdr *pStmt; /* List of pages in the statement subjournal */ + PgHdr *pDirty; /* List of all dirty pages */ + i64 journalOff; /* Current byte offset in the journal file */ + i64 journalHdr; /* Byte offset to previous journal header */ + i64 stmtHdrOff; /* First journal header written this statement */ + i64 stmtCksum; /* cksumInit when statement was started */ + i64 stmtJSize; /* Size of journal at stmt_begin() */ + int sectorSize; /* Assumed sector size during rollback */ +#ifdef SQLITE_TEST + int nHit, nMiss; /* Cache hits and missing */ + int nRead, nWrite; /* Database pages read/written */ +#endif + void (*xDestructor)(DbPage*,int); /* Call this routine when freeing pages */ + void (*xReiniter)(DbPage*,int); /* Call this routine when reloading pages */ +#ifdef SQLITE_HAS_CODEC + void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ + void *pCodecArg; /* First argument to xCodec() */ +#endif + int nHash; /* Size of the pager hash table */ + PgHdr **aHash; /* Hash table to map page number to PgHdr */ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + Pager *pNext; /* Doubly linked list of pagers on which */ + Pager *pPrev; /* sqlite3_release_memory() will work */ + int iInUseMM; /* Non-zero if unavailable to MM */ + int iInUseDB; /* Non-zero if in sqlite3_release_memory() */ +#endif + char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ + char dbFileVers[16]; /* Changes whenever database file changes */ +}; + +/* +** The following global variables hold counters used for +** testing purposes only. These variables do not exist in +** a non-testing build. These variables are not thread-safe. +*/ +#ifdef SQLITE_TEST +int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ +int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */ +int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */ +int sqlite3_pager_pgfree_count = 0; /* Number of cache pages freed */ +# define PAGER_INCR(v) v++ +#else +# define PAGER_INCR(v) +#endif + +/* +** The following variable points to the head of a double-linked list +** of all pagers that are eligible for page stealing by the +** sqlite3_release_memory() interface. Access to this list is +** protected by the SQLITE_MUTEX_STATIC_MEM2 mutex. +*/ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +static Pager *sqlite3PagerList = 0; +static PagerLruList sqlite3LruPageList = {0, 0, 0}; +#endif + + +/* +** Journal files begin with the following magic string. The data +** was obtained from /dev/random. It is used only as a sanity check. +** +** Since version 2.8.0, the journal format contains additional sanity +** checking information. If the power fails while the journal is begin +** written, semi-random garbage data might appear in the journal +** file after power is restored. If an attempt is then made +** to roll the journal back, the database could be corrupted. The additional +** sanity checking data is an attempt to discover the garbage in the +** journal and ignore it. +** +** The sanity checking information for the new journal format consists +** of a 32-bit checksum on each page of data. The checksum covers both +** the page number and the pPager->pageSize bytes of data for the page. +** This cksum is initialized to a 32-bit random value that appears in the +** journal file right after the header. The random initializer is important, +** because garbage data that appears at the end of a journal is likely +** data that was once in other files that have now been deleted. If the +** garbage data came from an obsolete journal file, the checksums might +** be correct. But by initializing the checksum to random value which +** is different for every journal, we minimize that risk. +*/ +static const unsigned char aJournalMagic[] = { + 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7, +}; + +/* +** The size of the header and of each page in the journal is determined +** by the following macros. +*/ +#define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8) + +/* +** The journal header size for this pager. In the future, this could be +** set to some value read from the disk controller. The important +** characteristic is that it is the same size as a disk sector. +*/ +#define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize) + +/* +** The macro MEMDB is true if we are dealing with an in-memory database. +** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set, +** the value of MEMDB will be a constant and the compiler will optimize +** out code that would never execute. +*/ +#ifdef SQLITE_OMIT_MEMORYDB +# define MEMDB 0 +#else +# define MEMDB pPager->memDb +#endif + +/* +** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is +** reserved for working around a windows/posix incompatibility). It is +** used in the journal to signify that the remainder of the journal file +** is devoted to storing a master journal name - there are no more pages to +** roll back. See comments for function writeMasterJournal() for details. +*/ +/* #define PAGER_MJ_PGNO(x) (PENDING_BYTE/((x)->pageSize)) */ +#define PAGER_MJ_PGNO(x) ((PENDING_BYTE/((x)->pageSize))+1) + +/* +** The maximum legal page number is (2^31 - 1). +*/ +#define PAGER_MAX_PGNO 2147483647 + +/* +** The pagerEnter() and pagerLeave() routines acquire and release +** a mutex on each pager. The mutex is recursive. +** +** This is a special-purpose mutex. It only provides mutual exclusion +** between the Btree and the Memory Management sqlite3_release_memory() +** function. It does not prevent, for example, two Btrees from accessing +** the same pager at the same time. Other general-purpose mutexes in +** the btree layer handle that chore. +*/ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + static void pagerEnter(Pager *p){ + p->iInUseDB++; + if( p->iInUseMM && p->iInUseDB==1 ){ + sqlite3_mutex *mutex; + mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM2); + p->iInUseDB = 0; + sqlite3_mutex_enter(mutex); + p->iInUseDB = 1; + sqlite3_mutex_leave(mutex); + } + assert( p->iInUseMM==0 ); + } + static void pagerLeave(Pager *p){ + p->iInUseDB--; + assert( p->iInUseDB>=0 ); + } +#else +# define pagerEnter(X) +# define pagerLeave(X) +#endif + +/* +** Enable reference count tracking (for debugging) here: +*/ +#ifdef SQLITE_DEBUG + int pager3_refinfo_enable = 0; + static void pager_refinfo(PgHdr *p){ + static int cnt = 0; + if( !pager3_refinfo_enable ) return; + sqlite3DebugPrintf( + "REFCNT: %4d addr=%p nRef=%-3d total=%d\n", + p->pgno, PGHDR_TO_DATA(p), p->nRef, p->pPager->nRef + ); + cnt++; /* Something to set a breakpoint on */ + } +# define REFINFO(X) pager_refinfo(X) +#else +# define REFINFO(X) +#endif + +/* +** Add page pPg to the end of the linked list managed by structure +** pList (pPg becomes the last entry in the list - the most recently +** used). Argument pLink should point to either pPg->free or pPg->gfree, +** depending on whether pPg is being added to the pager-specific or +** global LRU list. +*/ +static void listAdd(PagerLruList *pList, PagerLruLink *pLink, PgHdr *pPg){ + pLink->pNext = 0; + pLink->pPrev = pList->pLast; + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + assert(pLink==&pPg->free || pLink==&pPg->gfree); + assert(pLink==&pPg->gfree || pList!=&sqlite3LruPageList); +#endif + + if( pList->pLast ){ + int iOff = (char *)pLink - (char *)pPg; + PagerLruLink *pLastLink = (PagerLruLink *)(&((u8 *)pList->pLast)[iOff]); + pLastLink->pNext = pPg; + }else{ + assert(!pList->pFirst); + pList->pFirst = pPg; + } + + pList->pLast = pPg; + if( !pList->pFirstSynced && pPg->needSync==0 ){ + pList->pFirstSynced = pPg; + } +} + +/* +** Remove pPg from the list managed by the structure pointed to by pList. +** +** Argument pLink should point to either pPg->free or pPg->gfree, depending +** on whether pPg is being added to the pager-specific or global LRU list. +*/ +static void listRemove(PagerLruList *pList, PagerLruLink *pLink, PgHdr *pPg){ + int iOff = (char *)pLink - (char *)pPg; + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + assert(pLink==&pPg->free || pLink==&pPg->gfree); + assert(pLink==&pPg->gfree || pList!=&sqlite3LruPageList); +#endif + + if( pPg==pList->pFirst ){ + pList->pFirst = pLink->pNext; + } + if( pPg==pList->pLast ){ + pList->pLast = pLink->pPrev; + } + if( pLink->pPrev ){ + PagerLruLink *pPrevLink = (PagerLruLink *)(&((u8 *)pLink->pPrev)[iOff]); + pPrevLink->pNext = pLink->pNext; + } + if( pLink->pNext ){ + PagerLruLink *pNextLink = (PagerLruLink *)(&((u8 *)pLink->pNext)[iOff]); + pNextLink->pPrev = pLink->pPrev; + } + if( pPg==pList->pFirstSynced ){ + PgHdr *p = pLink->pNext; + while( p && p->needSync ){ + PagerLruLink *pL = (PagerLruLink *)(&((u8 *)p)[iOff]); + p = pL->pNext; + } + pList->pFirstSynced = p; + } + + pLink->pNext = pLink->pPrev = 0; +} + +/* +** Add page pPg to the list of free pages for the pager. If +** memory-management is enabled, also add the page to the global +** list of free pages. +*/ +static void lruListAdd(PgHdr *pPg){ + listAdd(&pPg->pPager->lru, &pPg->free, pPg); +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + if( !pPg->pPager->memDb ){ + sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU)); + listAdd(&sqlite3LruPageList, &pPg->gfree, pPg); + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU)); + } +#endif +} + +/* +** Remove page pPg from the list of free pages for the associated pager. +** If memory-management is enabled, also remove pPg from the global list +** of free pages. +*/ +static void lruListRemove(PgHdr *pPg){ + listRemove(&pPg->pPager->lru, &pPg->free, pPg); +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + if( !pPg->pPager->memDb ){ + sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU)); + listRemove(&sqlite3LruPageList, &pPg->gfree, pPg); + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU)); + } +#endif +} + +/* +** This function is called just after the needSync flag has been cleared +** from all pages managed by pPager (usually because the journal file +** has just been synced). It updates the pPager->lru.pFirstSynced variable +** and, if memory-management is enabled, the sqlite3LruPageList.pFirstSynced +** variable also. +*/ +static void lruListSetFirstSynced(Pager *pPager){ + pPager->lru.pFirstSynced = pPager->lru.pFirst; +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + if( !pPager->memDb ){ + PgHdr *p; + sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU)); + for(p=sqlite3LruPageList.pFirst; p && p->needSync; p=p->gfree.pNext); + assert(p==pPager->lru.pFirstSynced || p==sqlite3LruPageList.pFirstSynced); + sqlite3LruPageList.pFirstSynced = p; + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU)); + } +#endif +} + +/* +** Return true if page *pPg has already been written to the statement +** journal (or statement snapshot has been created, if *pPg is part +** of an in-memory database). +*/ +static int pageInStatement(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + if( MEMDB ){ + return PGHDR_TO_HIST(pPg, pPager)->inStmt; + }else{ + Pgno pgno = pPg->pgno; + u8 *a = pPager->aInStmt; + return (a && (int)pgno<=pPager->stmtSize && (a[pgno/8] & (1<<(pgno&7)))); + } +} + +/* +** Change the size of the pager hash table to N. N must be a power +** of two. +*/ +static void pager_resize_hash_table(Pager *pPager, int N){ + PgHdr **aHash, *pPg; + assert( N>0 && (N&(N-1))==0 ); + pagerLeave(pPager); + sqlite3MallocBenignFailure((int)pPager->aHash); + aHash = sqlite3MallocZero( sizeof(aHash[0])*N ); + pagerEnter(pPager); + if( aHash==0 ){ + /* Failure to rehash is not an error. It is only a performance hit. */ + return; + } + sqlite3_free(pPager->aHash); + pPager->nHash = N; + pPager->aHash = aHash; + for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ + int h; + if( pPg->pgno==0 ){ + assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); + continue; + } + h = pPg->pgno & (N-1); + pPg->pNextHash = aHash[h]; + if( aHash[h] ){ + aHash[h]->pPrevHash = pPg; + } + aHash[h] = pPg; + pPg->pPrevHash = 0; + } +} + +/* +** Read a 32-bit integer from the given file descriptor. Store the integer +** that is read in *pRes. Return SQLITE_OK if everything worked, or an +** error code is something goes wrong. +** +** All values are stored on disk as big-endian. +*/ +static int read32bits(sqlite3_file *fd, i64 offset, u32 *pRes){ + unsigned char ac[4]; + int rc = sqlite3OsRead(fd, ac, sizeof(ac), offset); + if( rc==SQLITE_OK ){ + *pRes = sqlite3Get4byte(ac); + } + return rc; +} + +/* +** Write a 32-bit integer into a string buffer in big-endian byte order. +*/ +#define put32bits(A,B) sqlite3Put4byte((u8*)A,B) + +/* +** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK +** on success or an error code is something goes wrong. +*/ +static int write32bits(sqlite3_file *fd, i64 offset, u32 val){ + char ac[4]; + put32bits(ac, val); + return sqlite3OsWrite(fd, ac, 4, offset); +} + +/* +** If file pFd is open, call sqlite3OsUnlock() on it. +*/ +static int osUnlock(sqlite3_file *pFd, int eLock){ + if( !pFd->pMethods ){ + return SQLITE_OK; + } + return sqlite3OsUnlock(pFd, eLock); +} + +/* +** This function determines whether or not the atomic-write optimization +** can be used with this pager. The optimization can be used if: +** +** (a) the value returned by OsDeviceCharacteristics() indicates that +** a database page may be written atomically, and +** (b) the value returned by OsSectorSize() is less than or equal +** to the page size. +** +** If the optimization cannot be used, 0 is returned. If it can be used, +** then the value returned is the size of the journal file when it +** contains rollback data for exactly one page. +*/ +#ifdef SQLITE_ENABLE_ATOMIC_WRITE +static int jrnlBufferSize(Pager *pPager){ + int dc; /* Device characteristics */ + int nSector; /* Sector size */ + int nPage; /* Page size */ + sqlite3_file *fd = pPager->fd; + + if( fd->pMethods ){ + dc = sqlite3OsDeviceCharacteristics(fd); + nSector = sqlite3OsSectorSize(fd); + nPage = pPager->pageSize; + } + + assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); + assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); + + if( !fd->pMethods || (dc&(SQLITE_IOCAP_ATOMIC|(nPage>>8))&&nSector<=nPage) ){ + return JOURNAL_HDR_SZ(pPager) + JOURNAL_PG_SZ(pPager); + } + return 0; +} +#endif + +/* +** This function should be called when an error occurs within the pager +** code. The first argument is a pointer to the pager structure, the +** second the error-code about to be returned by a pager API function. +** The value returned is a copy of the second argument to this function. +** +** If the second argument is SQLITE_IOERR, SQLITE_CORRUPT, or SQLITE_FULL +** the error becomes persistent. Until the persisten error is cleared, +** subsequent API calls on this Pager will immediately return the same +** error code. +** +** A persistent error indicates that the contents of the pager-cache +** cannot be trusted. This state can be cleared by completely discarding +** the contents of the pager-cache. If a transaction was active when +** the persistent error occured, then the rollback journal may need +** to be replayed. +*/ +static void pager_unlock(Pager *pPager); +static int pager_error(Pager *pPager, int rc){ + int rc2 = rc & 0xff; + assert( + pPager->errCode==SQLITE_FULL || + pPager->errCode==SQLITE_OK || + (pPager->errCode & 0xff)==SQLITE_IOERR + ); + if( + rc2==SQLITE_FULL || + rc2==SQLITE_IOERR || + rc2==SQLITE_CORRUPT + ){ + pPager->errCode = rc; + if( pPager->state==PAGER_UNLOCK && pPager->nRef==0 ){ + /* If the pager is already unlocked, call pager_unlock() now to + ** clear the error state and ensure that the pager-cache is + ** completely empty. + */ + pager_unlock(pPager); + } + } + return rc; +} + +/* +** If SQLITE_CHECK_PAGES is defined then we do some sanity checking +** on the cache using a hash function. This is used for testing +** and debugging only. +*/ +#ifdef SQLITE_CHECK_PAGES +/* +** Return a 32-bit hash of the page data for pPage. +*/ +static u32 pager_datahash(int nByte, unsigned char *pData){ + u32 hash = 0; + int i; + for(i=0; ipPager->pageSize, + (unsigned char *)PGHDR_TO_DATA(pPage)); +} + +/* +** The CHECK_PAGE macro takes a PgHdr* as an argument. If SQLITE_CHECK_PAGES +** is defined, and NDEBUG is not defined, an assert() statement checks +** that the page is either dirty or still matches the calculated page-hash. +*/ +#define CHECK_PAGE(x) checkPage(x) +static void checkPage(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + assert( !pPg->pageHash || pPager->errCode || MEMDB || pPg->dirty || + pPg->pageHash==pager_pagehash(pPg) ); +} + +#else +#define pager_datahash(X,Y) 0 +#define pager_pagehash(X) 0 +#define CHECK_PAGE(x) +#endif + +/* +** When this is called the journal file for pager pPager must be open. +** The master journal file name is read from the end of the file and +** written into memory supplied by the caller. +** +** zMaster must point to a buffer of at least nMaster bytes allocated by +** the caller. This should be sqlite3_vfs.mxPathname+1 (to ensure there is +** enough space to write the master journal name). If the master journal +** name in the journal is longer than nMaster bytes (including a +** nul-terminator), then this is handled as if no master journal name +** were present in the journal. +** +** If no master journal file name is present zMaster[0] is set to 0 and +** SQLITE_OK returned. +*/ +static int readMasterJournal(sqlite3_file *pJrnl, char *zMaster, int nMaster){ + int rc; + u32 len; + i64 szJ; + u32 cksum; + int i; + unsigned char aMagic[8]; /* A buffer to hold the magic header */ + + zMaster[0] = '\0'; + + rc = sqlite3OsFileSize(pJrnl, &szJ); + if( rc!=SQLITE_OK || szJ<16 ) return rc; + + rc = read32bits(pJrnl, szJ-16, &len); + if( rc!=SQLITE_OK ) return rc; + + if( len>=nMaster ){ + return SQLITE_OK; + } + + rc = read32bits(pJrnl, szJ-12, &cksum); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3OsRead(pJrnl, aMagic, 8, szJ-8); + if( rc!=SQLITE_OK || memcmp(aMagic, aJournalMagic, 8) ) return rc; + + rc = sqlite3OsRead(pJrnl, zMaster, len, szJ-16-len); + if( rc!=SQLITE_OK ){ + return rc; + } + zMaster[len] = '\0'; + + /* See if the checksum matches the master journal name */ + for(i=0; ijournalOff; + if( c ){ + offset = ((c-1)/JOURNAL_HDR_SZ(pPager) + 1) * JOURNAL_HDR_SZ(pPager); + } + assert( offset%JOURNAL_HDR_SZ(pPager)==0 ); + assert( offset>=c ); + assert( (offset-c)journalOff = offset; +} + +/* +** The journal file must be open when this routine is called. A journal +** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the +** current location. +** +** The format for the journal header is as follows: +** - 8 bytes: Magic identifying journal format. +** - 4 bytes: Number of records in journal, or -1 no-sync mode is on. +** - 4 bytes: Random number used for page hash. +** - 4 bytes: Initial database page count. +** - 4 bytes: Sector size used by the process that wrote this journal. +** +** Followed by (JOURNAL_HDR_SZ - 24) bytes of unused space. +*/ +static int writeJournalHdr(Pager *pPager){ + char zHeader[sizeof(aJournalMagic)+16]; + int rc; + + if( pPager->stmtHdrOff==0 ){ + pPager->stmtHdrOff = pPager->journalOff; + } + + seekJournalHdr(pPager); + pPager->journalHdr = pPager->journalOff; + + memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); + + /* + ** Write the nRec Field - the number of page records that follow this + ** journal header. Normally, zero is written to this value at this time. + ** After the records are added to the journal (and the journal synced, + ** if in full-sync mode), the zero is overwritten with the true number + ** of records (see syncJournal()). + ** + ** A faster alternative is to write 0xFFFFFFFF to the nRec field. When + ** reading the journal this value tells SQLite to assume that the + ** rest of the journal file contains valid page records. This assumption + ** is dangerous, as if a failure occured whilst writing to the journal + ** file it may contain some garbage data. There are two scenarios + ** where this risk can be ignored: + ** + ** * When the pager is in no-sync mode. Corruption can follow a + ** power failure in this case anyway. + ** + ** * When the SQLITE_IOCAP_SAFE_APPEND flag is set. This guarantees + ** that garbage data is never appended to the journal file. + */ + assert(pPager->fd->pMethods||pPager->noSync); + if( (pPager->noSync) + || (sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) + ){ + put32bits(&zHeader[sizeof(aJournalMagic)], 0xffffffff); + }else{ + put32bits(&zHeader[sizeof(aJournalMagic)], 0); + } + + /* The random check-hash initialiser */ + sqlite3Randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); + put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); + /* The initial database size */ + put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbSize); + /* The assumed sector size for this process */ + put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize); + IOTRACE(("JHDR %p %lld %d\n", pPager, pPager->journalHdr, sizeof(zHeader))) + rc = sqlite3OsWrite(pPager->jfd, zHeader, sizeof(zHeader),pPager->journalOff); + pPager->journalOff += JOURNAL_HDR_SZ(pPager); + + /* The journal header has been written successfully. Seek the journal + ** file descriptor to the end of the journal header sector. + */ + if( rc==SQLITE_OK ){ + IOTRACE(("JTAIL %p %lld\n", pPager, pPager->journalOff-1)) + rc = sqlite3OsWrite(pPager->jfd, "\000", 1, pPager->journalOff-1); + } + return rc; +} + +/* +** The journal file must be open when this is called. A journal header file +** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal +** file. See comments above function writeJournalHdr() for a description of +** the journal header format. +** +** If the header is read successfully, *nRec is set to the number of +** page records following this header and *dbSize is set to the size of the +** database before the transaction began, in pages. Also, pPager->cksumInit +** is set to the value read from the journal header. SQLITE_OK is returned +** in this case. +** +** If the journal header file appears to be corrupted, SQLITE_DONE is +** returned and *nRec and *dbSize are not set. If JOURNAL_HDR_SZ bytes +** cannot be read from the journal file an error code is returned. +*/ +static int readJournalHdr( + Pager *pPager, + i64 journalSize, + u32 *pNRec, + u32 *pDbSize +){ + int rc; + unsigned char aMagic[8]; /* A buffer to hold the magic header */ + i64 jrnlOff; + + seekJournalHdr(pPager); + if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){ + return SQLITE_DONE; + } + jrnlOff = pPager->journalOff; + + rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic), jrnlOff); + if( rc ) return rc; + jrnlOff += sizeof(aMagic); + + if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ + return SQLITE_DONE; + } + + rc = read32bits(pPager->jfd, jrnlOff, pNRec); + if( rc ) return rc; + + rc = read32bits(pPager->jfd, jrnlOff+4, &pPager->cksumInit); + if( rc ) return rc; + + rc = read32bits(pPager->jfd, jrnlOff+8, pDbSize); + if( rc ) return rc; + + /* Update the assumed sector-size to match the value used by + ** the process that created this journal. If this journal was + ** created by a process other than this one, then this routine + ** is being called from within pager_playback(). The local value + ** of Pager.sectorSize is restored at the end of that routine. + */ + rc = read32bits(pPager->jfd, jrnlOff+12, (u32 *)&pPager->sectorSize); + if( rc ) return rc; + + pPager->journalOff += JOURNAL_HDR_SZ(pPager); + return SQLITE_OK; +} + + +/* +** Write the supplied master journal name into the journal file for pager +** pPager at the current location. The master journal name must be the last +** thing written to a journal file. If the pager is in full-sync mode, the +** journal file descriptor is advanced to the next sector boundary before +** anything is written. The format is: +** +** + 4 bytes: PAGER_MJ_PGNO. +** + N bytes: length of master journal name. +** + 4 bytes: N +** + 4 bytes: Master journal name checksum. +** + 8 bytes: aJournalMagic[]. +** +** The master journal page checksum is the sum of the bytes in the master +** journal name. +** +** If zMaster is a NULL pointer (occurs for a single database transaction), +** this call is a no-op. +*/ +static int writeMasterJournal(Pager *pPager, const char *zMaster){ + int rc; + int len; + int i; + i64 jrnlOff; + u32 cksum = 0; + char zBuf[sizeof(aJournalMagic)+2*4]; + + if( !zMaster || pPager->setMaster) return SQLITE_OK; + pPager->setMaster = 1; + + len = strlen(zMaster); + for(i=0; ifullSync ){ + seekJournalHdr(pPager); + } + jrnlOff = pPager->journalOff; + pPager->journalOff += (len+20); + + rc = write32bits(pPager->jfd, jrnlOff, PAGER_MJ_PGNO(pPager)); + if( rc!=SQLITE_OK ) return rc; + jrnlOff += 4; + + rc = sqlite3OsWrite(pPager->jfd, zMaster, len, jrnlOff); + if( rc!=SQLITE_OK ) return rc; + jrnlOff += len; + + put32bits(zBuf, len); + put32bits(&zBuf[4], cksum); + memcpy(&zBuf[8], aJournalMagic, sizeof(aJournalMagic)); + rc = sqlite3OsWrite(pPager->jfd, zBuf, 8+sizeof(aJournalMagic), jrnlOff); + pPager->needSync = !pPager->noSync; + return rc; +} + +/* +** Add or remove a page from the list of all pages that are in the +** statement journal. +** +** The Pager keeps a separate list of pages that are currently in +** the statement journal. This helps the sqlite3PagerStmtCommit() +** routine run MUCH faster for the common case where there are many +** pages in memory but only a few are in the statement journal. +*/ +static void page_add_to_stmt_list(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); + assert( MEMDB ); + if( !pHist->inStmt ){ + assert( pHist->pPrevStmt==0 && pHist->pNextStmt==0 ); + if( pPager->pStmt ){ + PGHDR_TO_HIST(pPager->pStmt, pPager)->pPrevStmt = pPg; + } + pHist->pNextStmt = pPager->pStmt; + pPager->pStmt = pPg; + pHist->inStmt = 1; + } +} + +/* +** Find a page in the hash table given its page number. Return +** a pointer to the page or NULL if not found. +*/ +static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){ + PgHdr *p; + if( pPager->aHash==0 ) return 0; + p = pPager->aHash[pgno & (pPager->nHash-1)]; + while( p && p->pgno!=pgno ){ + p = p->pNextHash; + } + return p; +} + +/* +** Clear the in-memory cache. This routine +** sets the state of the pager back to what it was when it was first +** opened. Any outstanding pages are invalidated and subsequent attempts +** to access those pages will likely result in a coredump. +*/ +static void pager_reset(Pager *pPager){ + PgHdr *pPg, *pNext; + if( pPager->errCode ) return; + for(pPg=pPager->pAll; pPg; pPg=pNext){ + IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); + PAGER_INCR(sqlite3_pager_pgfree_count); + pNext = pPg->pNextAll; + lruListRemove(pPg); + sqlite3_free(pPg->pData); + sqlite3_free(pPg); + } + assert(pPager->lru.pFirst==0); + assert(pPager->lru.pFirstSynced==0); + assert(pPager->lru.pLast==0); + pPager->pStmt = 0; + pPager->pAll = 0; + pPager->pDirty = 0; + pPager->nHash = 0; + sqlite3_free(pPager->aHash); + pPager->nPage = 0; + pPager->aHash = 0; + pPager->nRef = 0; +} + +/* +** Unlock the database file. +** +** If the pager is currently in error state, discard the contents of +** the cache and reset the Pager structure internal state. If there is +** an open journal-file, then the next time a shared-lock is obtained +** on the pager file (by this or any other process), it will be +** treated as a hot-journal and rolled back. +*/ +static void pager_unlock(Pager *pPager){ + if( !pPager->exclusiveMode ){ + if( !MEMDB ){ + if( pPager->fd->pMethods ){ + osUnlock(pPager->fd, NO_LOCK); + } + pPager->dbSize = -1; + IOTRACE(("UNLOCK %p\n", pPager)) + + /* If Pager.errCode is set, the contents of the pager cache cannot be + ** trusted. Now that the pager file is unlocked, the contents of the + ** cache can be discarded and the error code safely cleared. + */ + if( pPager->errCode ){ + pPager->errCode = SQLITE_OK; + pager_reset(pPager); + if( pPager->stmtOpen ){ + sqlite3OsClose(pPager->stfd); + sqlite3_free(pPager->aInStmt); + pPager->aInStmt = 0; + } + if( pPager->journalOpen ){ + sqlite3OsClose(pPager->jfd); + pPager->journalOpen = 0; + sqlite3_free(pPager->aInJournal); + pPager->aInJournal = 0; + } + pPager->stmtOpen = 0; + pPager->stmtInUse = 0; + pPager->journalOff = 0; + pPager->journalStarted = 0; + pPager->stmtAutoopen = 0; + pPager->origDbSize = 0; + } + } + + if( !MEMDB || pPager->errCode==SQLITE_OK ){ + pPager->state = PAGER_UNLOCK; + pPager->changeCountDone = 0; + } + } +} + +/* +** Execute a rollback if a transaction is active and unlock the +** database file. If the pager has already entered the error state, +** do not attempt the rollback. +*/ +static void pagerUnlockAndRollback(Pager *p){ + assert( p->state>=PAGER_RESERVED || p->journalOpen==0 ); + if( p->errCode==SQLITE_OK && p->state>=PAGER_RESERVED ){ + sqlite3PagerRollback(p); + } + pager_unlock(p); + assert( p->errCode || !p->journalOpen || (p->exclusiveMode&&!p->journalOff) ); + assert( p->errCode || !p->stmtOpen || p->exclusiveMode ); +} + +/* +** This routine ends a transaction. A transaction is ended by either +** a COMMIT or a ROLLBACK. +** +** When this routine is called, the pager has the journal file open and +** a RESERVED or EXCLUSIVE lock on the database. This routine will release +** the database lock and acquires a SHARED lock in its place if that is +** the appropriate thing to do. Release locks usually is appropriate, +** unless we are in exclusive access mode or unless this is a +** COMMIT AND BEGIN or ROLLBACK AND BEGIN operation. +** +** The journal file is either deleted or truncated. +** +** TODO: Consider keeping the journal file open for temporary databases. +** This might give a performance improvement on windows where opening +** a file is an expensive operation. +*/ +static int pager_end_transaction(Pager *pPager){ + PgHdr *pPg; + int rc = SQLITE_OK; + int rc2 = SQLITE_OK; + assert( !MEMDB ); + if( pPager->statestmtOpen && !pPager->exclusiveMode ){ + sqlite3OsClose(pPager->stfd); + pPager->stmtOpen = 0; + } + if( pPager->journalOpen ){ + if( pPager->exclusiveMode + && (rc = sqlite3OsTruncate(pPager->jfd, 0))==SQLITE_OK ){; + pPager->journalOff = 0; + pPager->journalStarted = 0; + }else{ + sqlite3OsClose(pPager->jfd); + pPager->journalOpen = 0; + if( rc==SQLITE_OK ){ + rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); + } + } + sqlite3_free( pPager->aInJournal ); + pPager->aInJournal = 0; + for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ + pPg->inJournal = 0; + pPg->dirty = 0; + pPg->needSync = 0; + pPg->alwaysRollback = 0; +#ifdef SQLITE_CHECK_PAGES + pPg->pageHash = pager_pagehash(pPg); +#endif + } + pPager->pDirty = 0; + pPager->dirtyCache = 0; + pPager->nRec = 0; + }else{ + assert( pPager->aInJournal==0 ); + assert( pPager->dirtyCache==0 || pPager->useJournal==0 ); + } + + if( !pPager->exclusiveMode ){ + rc2 = osUnlock(pPager->fd, SHARED_LOCK); + pPager->state = PAGER_SHARED; + }else if( pPager->state==PAGER_SYNCED ){ + pPager->state = PAGER_EXCLUSIVE; + } + pPager->origDbSize = 0; + pPager->setMaster = 0; + pPager->needSync = 0; + lruListSetFirstSynced(pPager); + pPager->dbSize = -1; + + return (rc==SQLITE_OK?rc2:rc); +} + +/* +** Compute and return a checksum for the page of data. +** +** This is not a real checksum. It is really just the sum of the +** random initial value and the page number. We experimented with +** a checksum of the entire data, but that was found to be too slow. +** +** Note that the page number is stored at the beginning of data and +** the checksum is stored at the end. This is important. If journal +** corruption occurs due to a power failure, the most likely scenario +** is that one end or the other of the record will be changed. It is +** much less likely that the two ends of the journal record will be +** correct and the middle be corrupt. Thus, this "checksum" scheme, +** though fast and simple, catches the mostly likely kind of corruption. +** +** FIX ME: Consider adding every 200th (or so) byte of the data to the +** checksum. That way if a single page spans 3 or more disk sectors and +** only the middle sector is corrupt, we will still have a reasonable +** chance of failing the checksum and thus detecting the problem. +*/ +static u32 pager_cksum(Pager *pPager, const u8 *aData){ + u32 cksum = pPager->cksumInit; + int i = pPager->pageSize-200; + while( i>0 ){ + cksum += aData[i]; + i -= 200; + } + return cksum; +} + +/* Forward declaration */ +static void makeClean(PgHdr*); + +/* +** Read a single page from the journal file opened on file descriptor +** jfd. Playback this one page. +** +** If useCksum==0 it means this journal does not use checksums. Checksums +** are not used in statement journals because statement journals do not +** need to survive power failures. +*/ +static int pager_playback_one_page( + Pager *pPager, + sqlite3_file *jfd, + i64 offset, + int useCksum +){ + int rc; + PgHdr *pPg; /* An existing page in the cache */ + Pgno pgno; /* The page number of a page in journal */ + u32 cksum; /* Checksum used for sanity checking */ + u8 *aData = (u8 *)pPager->pTmpSpace; /* Temp storage for a page */ + + /* useCksum should be true for the main journal and false for + ** statement journals. Verify that this is always the case + */ + assert( jfd == (useCksum ? pPager->jfd : pPager->stfd) ); + assert( aData ); + + rc = read32bits(jfd, offset, &pgno); + if( rc!=SQLITE_OK ) return rc; + rc = sqlite3OsRead(jfd, aData, pPager->pageSize, offset+4); + if( rc!=SQLITE_OK ) return rc; + pPager->journalOff += pPager->pageSize + 4; + + /* Sanity checking on the page. This is more important that I originally + ** thought. If a power failure occurs while the journal is being written, + ** it could cause invalid data to be written into the journal. We need to + ** detect this invalid data (with high probability) and ignore it. + */ + if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ + return SQLITE_DONE; + } + if( pgno>(unsigned)pPager->dbSize ){ + return SQLITE_OK; + } + if( useCksum ){ + rc = read32bits(jfd, offset+pPager->pageSize+4, &cksum); + if( rc ) return rc; + pPager->journalOff += 4; + if( pager_cksum(pPager, aData)!=cksum ){ + return SQLITE_DONE; + } + } + + assert( pPager->state==PAGER_RESERVED || pPager->state>=PAGER_EXCLUSIVE ); + + /* If the pager is in RESERVED state, then there must be a copy of this + ** page in the pager cache. In this case just update the pager cache, + ** not the database file. The page is left marked dirty in this case. + ** + ** An exception to the above rule: If the database is in no-sync mode + ** and a page is moved during an incremental vacuum then the page may + ** not be in the pager cache. Later: if a malloc() or IO error occurs + ** during a Movepage() call, then the page may not be in the cache + ** either. So the condition described in the above paragraph is not + ** assert()able. + ** + ** If in EXCLUSIVE state, then we update the pager cache if it exists + ** and the main file. The page is then marked not dirty. + ** + ** Ticket #1171: The statement journal might contain page content that is + ** different from the page content at the start of the transaction. + ** This occurs when a page is changed prior to the start of a statement + ** then changed again within the statement. When rolling back such a + ** statement we must not write to the original database unless we know + ** for certain that original page contents are synced into the main rollback + ** journal. Otherwise, a power loss might leave modified data in the + ** database file without an entry in the rollback journal that can + ** restore the database to its original form. Two conditions must be + ** met before writing to the database files. (1) the database must be + ** locked. (2) we know that the original page content is fully synced + ** in the main journal either because the page is not in cache or else + ** the page is marked as needSync==0. + */ + pPg = pager_lookup(pPager, pgno); + PAGERTRACE4("PLAYBACK %d page %d hash(%08x)\n", + PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, aData)); + if( pPager->state>=PAGER_EXCLUSIVE && (pPg==0 || pPg->needSync==0) ){ + i64 offset = (pgno-1)*(i64)pPager->pageSize; + rc = sqlite3OsWrite(pPager->fd, aData, pPager->pageSize, offset); + if( pPg ){ + makeClean(pPg); + } + } + if( pPg ){ + /* No page should ever be explicitly rolled back that is in use, except + ** for page 1 which is held in use in order to keep the lock on the + ** database active. However such a page may be rolled back as a result + ** of an internal error resulting in an automatic call to + ** sqlite3PagerRollback(). + */ + void *pData; + /* assert( pPg->nRef==0 || pPg->pgno==1 ); */ + pData = PGHDR_TO_DATA(pPg); + memcpy(pData, aData, pPager->pageSize); + if( pPager->xReiniter ){ + pPager->xReiniter(pPg, pPager->pageSize); + } +#ifdef SQLITE_CHECK_PAGES + pPg->pageHash = pager_pagehash(pPg); +#endif + /* If this was page 1, then restore the value of Pager.dbFileVers. + ** Do this before any decoding. */ + if( pgno==1 ){ + memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers)); + } + + /* Decode the page just read from disk */ + CODEC1(pPager, pData, pPg->pgno, 3); + } + return rc; +} + +/* +** Parameter zMaster is the name of a master journal file. A single journal +** file that referred to the master journal file has just been rolled back. +** This routine checks if it is possible to delete the master journal file, +** and does so if it is. +** +** Argument zMaster may point to Pager.pTmpSpace. So that buffer is not +** available for use within this function. +** +** +** The master journal file contains the names of all child journals. +** To tell if a master journal can be deleted, check to each of the +** children. If all children are either missing or do not refer to +** a different master journal, then this master journal can be deleted. +*/ +static int pager_delmaster(Pager *pPager, const char *zMaster){ + sqlite3_vfs *pVfs = pPager->pVfs; + int rc; + int master_open = 0; + sqlite3_file *pMaster; + sqlite3_file *pJournal; + char *zMasterJournal = 0; /* Contents of master journal file */ + i64 nMasterJournal; /* Size of master journal file */ + + /* Open the master journal file exclusively in case some other process + ** is running this routine also. Not that it makes too much difference. + */ + pMaster = (sqlite3_file *)sqlite3_malloc(pVfs->szOsFile * 2); + pJournal = (sqlite3_file *)(((u8 *)pMaster) + pVfs->szOsFile); + if( !pMaster ){ + rc = SQLITE_NOMEM; + }else{ + int flags = (SQLITE_OPEN_READONLY|SQLITE_OPEN_MASTER_JOURNAL); + rc = sqlite3OsOpen(pVfs, zMaster, pMaster, flags, 0); + } + if( rc!=SQLITE_OK ) goto delmaster_out; + master_open = 1; + + rc = sqlite3OsFileSize(pMaster, &nMasterJournal); + if( rc!=SQLITE_OK ) goto delmaster_out; + + if( nMasterJournal>0 ){ + char *zJournal; + char *zMasterPtr = 0; + int nMasterPtr = pPager->pVfs->mxPathname+1; + + /* Load the entire master journal file into space obtained from + ** sqlite3_malloc() and pointed to by zMasterJournal. + */ + zMasterJournal = (char *)sqlite3_malloc(nMasterJournal + nMasterPtr); + if( !zMasterJournal ){ + rc = SQLITE_NOMEM; + goto delmaster_out; + } + zMasterPtr = &zMasterJournal[nMasterJournal]; + rc = sqlite3OsRead(pMaster, zMasterJournal, nMasterJournal, 0); + if( rc!=SQLITE_OK ) goto delmaster_out; + + zJournal = zMasterJournal; + while( (zJournal-zMasterJournal)state>=PAGER_EXCLUSIVE && pPager->fd->pMethods ){ + rc = sqlite3OsTruncate(pPager->fd, pPager->pageSize*(i64)nPage); + } + if( rc==SQLITE_OK ){ + pPager->dbSize = nPage; + pager_truncate_cache(pPager); + } + return rc; +} + +/* +** Set the sectorSize for the given pager. +** +** The sector size is the larger of the sector size reported +** by sqlite3OsSectorSize() and the pageSize. +*/ +static void setSectorSize(Pager *pPager){ + assert(pPager->fd->pMethods||pPager->tempFile); + if( !pPager->tempFile ){ + /* Sector size doesn't matter for temporary files. Also, the file + ** may not have been opened yet, in whcih case the OsSectorSize() + ** call will segfault. + */ + pPager->sectorSize = sqlite3OsSectorSize(pPager->fd); + } + if( pPager->sectorSizepageSize ){ + pPager->sectorSize = pPager->pageSize; + } +} + +/* +** Playback the journal and thus restore the database file to +** the state it was in before we started making changes. +** +** The journal file format is as follows: +** +** (1) 8 byte prefix. A copy of aJournalMagic[]. +** (2) 4 byte big-endian integer which is the number of valid page records +** in the journal. If this value is 0xffffffff, then compute the +** number of page records from the journal size. +** (3) 4 byte big-endian integer which is the initial value for the +** sanity checksum. +** (4) 4 byte integer which is the number of pages to truncate the +** database to during a rollback. +** (5) 4 byte integer which is the number of bytes in the master journal +** name. The value may be zero (indicate that there is no master +** journal.) +** (6) N bytes of the master journal name. The name will be nul-terminated +** and might be shorter than the value read from (5). If the first byte +** of the name is \000 then there is no master journal. The master +** journal name is stored in UTF-8. +** (7) Zero or more pages instances, each as follows: +** + 4 byte page number. +** + pPager->pageSize bytes of data. +** + 4 byte checksum +** +** When we speak of the journal header, we mean the first 6 items above. +** Each entry in the journal is an instance of the 7th item. +** +** Call the value from the second bullet "nRec". nRec is the number of +** valid page entries in the journal. In most cases, you can compute the +** value of nRec from the size of the journal file. But if a power +** failure occurred while the journal was being written, it could be the +** case that the size of the journal file had already been increased but +** the extra entries had not yet made it safely to disk. In such a case, +** the value of nRec computed from the file size would be too large. For +** that reason, we always use the nRec value in the header. +** +** If the nRec value is 0xffffffff it means that nRec should be computed +** from the file size. This value is used when the user selects the +** no-sync option for the journal. A power failure could lead to corruption +** in this case. But for things like temporary table (which will be +** deleted when the power is restored) we don't care. +** +** If the file opened as the journal file is not a well-formed +** journal file then all pages up to the first corrupted page are rolled +** back (or no pages if the journal header is corrupted). The journal file +** is then deleted and SQLITE_OK returned, just as if no corruption had +** been encountered. +** +** If an I/O or malloc() error occurs, the journal-file is not deleted +** and an error code is returned. +*/ +static int pager_playback(Pager *pPager, int isHot){ + sqlite3_vfs *pVfs = pPager->pVfs; + i64 szJ; /* Size of the journal file in bytes */ + u32 nRec; /* Number of Records in the journal */ + int i; /* Loop counter */ + Pgno mxPg = 0; /* Size of the original file in pages */ + int rc; /* Result code of a subroutine */ + char *zMaster = 0; /* Name of master journal file if any */ + + /* Figure out how many records are in the journal. Abort early if + ** the journal is empty. + */ + assert( pPager->journalOpen ); + rc = sqlite3OsFileSize(pPager->jfd, &szJ); + if( rc!=SQLITE_OK || szJ==0 ){ + goto end_playback; + } + + /* Read the master journal name from the journal, if it is present. + ** If a master journal file name is specified, but the file is not + ** present on disk, then the journal is not hot and does not need to be + ** played back. + */ + zMaster = pPager->pTmpSpace; + rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); + assert( rc!=SQLITE_DONE ); + if( rc!=SQLITE_OK + || (zMaster[0] && !sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS)) + ){ + zMaster = 0; + if( rc==SQLITE_DONE ) rc = SQLITE_OK; + goto end_playback; + } + pPager->journalOff = 0; + zMaster = 0; + + /* This loop terminates either when the readJournalHdr() call returns + ** SQLITE_DONE or an IO error occurs. */ + while( 1 ){ + + /* Read the next journal header from the journal file. If there are + ** not enough bytes left in the journal file for a complete header, or + ** it is corrupted, then a process must of failed while writing it. + ** This indicates nothing more needs to be rolled back. + */ + rc = readJournalHdr(pPager, szJ, &nRec, &mxPg); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + } + goto end_playback; + } + + /* If nRec is 0xffffffff, then this journal was created by a process + ** working in no-sync mode. This means that the rest of the journal + ** file consists of pages, there are no more journal headers. Compute + ** the value of nRec based on this assumption. + */ + if( nRec==0xffffffff ){ + assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); + nRec = (szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager); + } + + /* If nRec is 0 and this rollback is of a transaction created by this + ** process and if this is the final header in the journal, then it means + ** that this part of the journal was being filled but has not yet been + ** synced to disk. Compute the number of pages based on the remaining + ** size of the file. + ** + ** The third term of the test was added to fix ticket #2565. + */ + if( nRec==0 && !isHot && + pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ + nRec = (szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager); + } + + /* If this is the first header read from the journal, truncate the + ** database file back to it's original size. + */ + if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){ + rc = pager_truncate(pPager, mxPg); + if( rc!=SQLITE_OK ){ + goto end_playback; + } + } + + /* Copy original pages out of the journal and back into the database file. + */ + for(i=0; ijfd, pPager->journalOff, 1); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + pPager->journalOff = szJ; + break; + }else{ + goto end_playback; + } + } + } + } + /*NOTREACHED*/ + assert( 0 ); + +end_playback: + if( rc==SQLITE_OK ){ + zMaster = pPager->pTmpSpace; + rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); + } + if( rc==SQLITE_OK ){ + rc = pager_end_transaction(pPager); + } + if( rc==SQLITE_OK && zMaster[0] ){ + /* If there was a master journal and this routine will return success, + ** see if it is possible to delete the master journal. + */ + rc = pager_delmaster(pPager, zMaster); + } + + /* The Pager.sectorSize variable may have been updated while rolling + ** back a journal created by a process with a different sector size + ** value. Reset it to the correct value for this process. + */ + setSectorSize(pPager); + return rc; +} + +/* +** Playback the statement journal. +** +** This is similar to playing back the transaction journal but with +** a few extra twists. +** +** (1) The number of pages in the database file at the start of +** the statement is stored in pPager->stmtSize, not in the +** journal file itself. +** +** (2) In addition to playing back the statement journal, also +** playback all pages of the transaction journal beginning +** at offset pPager->stmtJSize. +*/ +static int pager_stmt_playback(Pager *pPager){ + i64 szJ; /* Size of the full journal */ + i64 hdrOff; + int nRec; /* Number of Records */ + int i; /* Loop counter */ + int rc; + + szJ = pPager->journalOff; +#ifndef NDEBUG + { + i64 os_szJ; + rc = sqlite3OsFileSize(pPager->jfd, &os_szJ); + if( rc!=SQLITE_OK ) return rc; + assert( szJ==os_szJ ); + } +#endif + + /* Set hdrOff to be the offset just after the end of the last journal + ** page written before the first journal-header for this statement + ** transaction was written, or the end of the file if no journal + ** header was written. + */ + hdrOff = pPager->stmtHdrOff; + assert( pPager->fullSync || !hdrOff ); + if( !hdrOff ){ + hdrOff = szJ; + } + + /* Truncate the database back to its original size. + */ + rc = pager_truncate(pPager, pPager->stmtSize); + assert( pPager->state>=PAGER_SHARED ); + + /* Figure out how many records are in the statement journal. + */ + assert( pPager->stmtInUse && pPager->journalOpen ); + nRec = pPager->stmtNRec; + + /* Copy original pages out of the statement journal and back into the + ** database file. Note that the statement journal omits checksums from + ** each record since power-failure recovery is not important to statement + ** journals. + */ + for(i=0; ipageSize); + rc = pager_playback_one_page(pPager, pPager->stfd, offset, 0); + assert( rc!=SQLITE_DONE ); + if( rc!=SQLITE_OK ) goto end_stmt_playback; + } + + /* Now roll some pages back from the transaction journal. Pager.stmtJSize + ** was the size of the journal file when this statement was started, so + ** everything after that needs to be rolled back, either into the + ** database, the memory cache, or both. + ** + ** If it is not zero, then Pager.stmtHdrOff is the offset to the start + ** of the first journal header written during this statement transaction. + */ + pPager->journalOff = pPager->stmtJSize; + pPager->cksumInit = pPager->stmtCksum; + while( pPager->journalOff < hdrOff ){ + rc = pager_playback_one_page(pPager, pPager->jfd, pPager->journalOff, 1); + assert( rc!=SQLITE_DONE ); + if( rc!=SQLITE_OK ) goto end_stmt_playback; + } + + while( pPager->journalOff < szJ ){ + u32 nJRec; /* Number of Journal Records */ + u32 dummy; + rc = readJournalHdr(pPager, szJ, &nJRec, &dummy); + if( rc!=SQLITE_OK ){ + assert( rc!=SQLITE_DONE ); + goto end_stmt_playback; + } + if( nJRec==0 ){ + nJRec = (szJ - pPager->journalOff) / (pPager->pageSize+8); + } + for(i=nJRec-1; i>=0 && pPager->journalOff < szJ; i--){ + rc = pager_playback_one_page(pPager, pPager->jfd, pPager->journalOff, 1); + assert( rc!=SQLITE_DONE ); + if( rc!=SQLITE_OK ) goto end_stmt_playback; + } + } + + pPager->journalOff = szJ; + +end_stmt_playback: + if( rc==SQLITE_OK) { + pPager->journalOff = szJ; + /* pager_reload_cache(pPager); */ + } + return rc; +} + +/* +** Change the maximum number of in-memory pages that are allowed. +*/ +void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){ + if( mxPage>10 ){ + pPager->mxPage = mxPage; + }else{ + pPager->mxPage = 10; + } +} + +/* +** Adjust the robustness of the database to damage due to OS crashes +** or power failures by changing the number of syncs()s when writing +** the rollback journal. There are three levels: +** +** OFF sqlite3OsSync() is never called. This is the default +** for temporary and transient files. +** +** NORMAL The journal is synced once before writes begin on the +** database. This is normally adequate protection, but +** it is theoretically possible, though very unlikely, +** that an inopertune power failure could leave the journal +** in a state which would cause damage to the database +** when it is rolled back. +** +** FULL The journal is synced twice before writes begin on the +** database (with some additional information - the nRec field +** of the journal header - being written in between the two +** syncs). If we assume that writing a +** single disk sector is atomic, then this mode provides +** assurance that the journal will not be corrupted to the +** point of causing damage to the database during rollback. +** +** Numeric values associated with these states are OFF==1, NORMAL=2, +** and FULL=3. +*/ +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +void sqlite3PagerSetSafetyLevel(Pager *pPager, int level, int full_fsync){ + pPager->noSync = level==1 || pPager->tempFile; + pPager->fullSync = level==3 && !pPager->tempFile; + pPager->sync_flags = (full_fsync?SQLITE_SYNC_FULL:SQLITE_SYNC_NORMAL); + if( pPager->noSync ) pPager->needSync = 0; +} +#endif + +/* +** The following global variable is incremented whenever the library +** attempts to open a temporary file. This information is used for +** testing and analysis only. +*/ +#ifdef SQLITE_TEST +int sqlite3_opentemp_count = 0; +#endif + +/* +** Open a temporary file. +** +** Write the file descriptor into *fd. Return SQLITE_OK on success or some +** other error code if we fail. The OS will automatically delete the temporary +** file when it is closed. +*/ +static int sqlite3PagerOpentemp( + sqlite3_vfs *pVfs, /* The virtual file system layer */ + sqlite3_file *pFile, /* Write the file descriptor here */ + char *zFilename, /* Name of the file. Might be NULL */ + int vfsFlags /* Flags passed through to the VFS */ +){ + int rc; + assert( zFilename!=0 ); + +#ifdef SQLITE_TEST + sqlite3_opentemp_count++; /* Used for testing and analysis only */ +#endif + + vfsFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE; + rc = sqlite3OsOpen(pVfs, zFilename, pFile, vfsFlags, 0); + assert( rc!=SQLITE_OK || pFile->pMethods ); + return rc; +} + +/* +** Create a new page cache and put a pointer to the page cache in *ppPager. +** The file to be cached need not exist. The file is not locked until +** the first call to sqlite3PagerGet() and is only held open until the +** last page is released using sqlite3PagerUnref(). +** +** If zFilename is NULL then a randomly-named temporary file is created +** and used as the file to be cached. The file will be deleted +** automatically when it is closed. +** +** If zFilename is ":memory:" then all information is held in cache. +** It is never written to disk. This can be used to implement an +** in-memory database. +*/ +int sqlite3PagerOpen( + sqlite3_vfs *pVfs, /* The virtual file system to use */ + Pager **ppPager, /* Return the Pager structure here */ + const char *zFilename, /* Name of the database file to open */ + int nExtra, /* Extra bytes append to each in-memory page */ + int flags, /* flags controlling this file */ + int vfsFlags /* flags passed through to sqlite3_vfs.xOpen() */ +){ + u8 *pPtr; + Pager *pPager = 0; + int rc = SQLITE_OK; + int i; + int tempFile = 0; + int memDb = 0; + int readOnly = 0; + int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; + int noReadlock = (flags & PAGER_NO_READLOCK)!=0; + int journalFileSize = sqlite3JournalSize(pVfs); + int nDefaultPage = SQLITE_DEFAULT_PAGE_SIZE; + char *zPathname; + int nPathname; + + /* The default return is a NULL pointer */ + *ppPager = 0; + + /* Compute the full pathname */ + nPathname = pVfs->mxPathname+1; + zPathname = sqlite3_malloc(nPathname); + if( zPathname==0 ){ + return SQLITE_NOMEM; + } + if( zFilename && zFilename[0] ){ +#ifndef SQLITE_OMIT_MEMORYDB + if( strcmp(zFilename,":memory:")==0 ){ + memDb = 1; + zPathname[0] = 0; + }else +#endif + { + rc = sqlite3OsFullPathname(pVfs, zFilename, nPathname, zPathname); + } + }else{ + rc = sqlite3OsGetTempname(pVfs, nPathname, zPathname); + } + if( rc!=SQLITE_OK ){ + sqlite3_free(zPathname); + return rc; + } + nPathname = strlen(zPathname); + + /* Allocate memory for the pager structure */ + pPager = sqlite3MallocZero( + sizeof(*pPager) + /* Pager structure */ + journalFileSize + /* The journal file structure */ + pVfs->szOsFile * 2 + /* The db and stmt journal files */ + 4*nPathname + 40 /* zFilename, zDirectory, zJournal, zStmtJrnl */ + ); + if( !pPager ){ + sqlite3_free(zPathname); + return SQLITE_NOMEM; + } + pPtr = (u8 *)&pPager[1]; + pPager->vfsFlags = vfsFlags; + pPager->fd = (sqlite3_file*)&pPtr[pVfs->szOsFile*0]; + pPager->stfd = (sqlite3_file*)&pPtr[pVfs->szOsFile*1]; + pPager->jfd = (sqlite3_file*)&pPtr[pVfs->szOsFile*2]; + pPager->zFilename = (char*)&pPtr[pVfs->szOsFile*2+journalFileSize]; + pPager->zDirectory = &pPager->zFilename[nPathname+1]; + pPager->zJournal = &pPager->zDirectory[nPathname+1]; + pPager->zStmtJrnl = &pPager->zJournal[nPathname+10]; + pPager->pVfs = pVfs; + memcpy(pPager->zFilename, zPathname, nPathname+1); + sqlite3_free(zPathname); + + /* Open the pager file. + */ + if( zFilename && zFilename[0] && !memDb ){ + if( nPathname>(pVfs->mxPathname - sizeof("-journal")) ){ + rc = SQLITE_CANTOPEN; + }else{ + int fout = 0; + rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, + pPager->vfsFlags, &fout); + readOnly = (fout&SQLITE_OPEN_READONLY); + + /* If the file was successfully opened for read/write access, + ** choose a default page size in case we have to create the + ** database file. The default page size is the maximum of: + ** + ** + SQLITE_DEFAULT_PAGE_SIZE, + ** + The value returned by sqlite3OsSectorSize() + ** + The largest page size that can be written atomically. + */ + if( rc==SQLITE_OK && !readOnly ){ + int iSectorSize = sqlite3OsSectorSize(pPager->fd); + if( nDefaultPagefd); + int ii; + assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); + assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); + assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536); + for(ii=nDefaultPage; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){ + if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ) nDefaultPage = ii; + } + } +#endif + if( nDefaultPage>SQLITE_MAX_DEFAULT_PAGE_SIZE ){ + nDefaultPage = SQLITE_MAX_DEFAULT_PAGE_SIZE; + } + } + } + }else if( !memDb ){ + /* If a temporary file is requested, it is not opened immediately. + ** In this case we accept the default page size and delay actually + ** opening the file until the first call to OsWrite(). + */ + tempFile = 1; + pPager->state = PAGER_EXCLUSIVE; + } + + if( pPager && rc==SQLITE_OK ){ + pPager->pTmpSpace = (char *)sqlite3_malloc(nDefaultPage); + } + + /* If an error occured in either of the blocks above. + ** Free the Pager structure and close the file. + ** Since the pager is not allocated there is no need to set + ** any Pager.errMask variables. + */ + if( !pPager || !pPager->pTmpSpace ){ + sqlite3OsClose(pPager->fd); + sqlite3_free(pPager); + return ((rc==SQLITE_OK)?SQLITE_NOMEM:rc); + } + + PAGERTRACE3("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename); + IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename)) + + /* Fill in Pager.zDirectory[] */ + memcpy(pPager->zDirectory, pPager->zFilename, nPathname+1); + for(i=strlen(pPager->zDirectory); i>0 && pPager->zDirectory[i-1]!='/'; i--){} + if( i>0 ) pPager->zDirectory[i-1] = 0; + + /* Fill in Pager.zJournal[] and Pager.zStmtJrnl[] */ + memcpy(pPager->zJournal, pPager->zFilename, nPathname); + memcpy(&pPager->zJournal[nPathname], "-journal", 9); + memcpy(pPager->zStmtJrnl, pPager->zFilename, nPathname); + memcpy(&pPager->zStmtJrnl[nPathname], "-stmtjrnl", 10); + + /* pPager->journalOpen = 0; */ + pPager->useJournal = useJournal && !memDb; + pPager->noReadlock = noReadlock && readOnly; + /* pPager->stmtOpen = 0; */ + /* pPager->stmtInUse = 0; */ + /* pPager->nRef = 0; */ + pPager->dbSize = memDb-1; + pPager->pageSize = nDefaultPage; + /* pPager->stmtSize = 0; */ + /* pPager->stmtJSize = 0; */ + /* pPager->nPage = 0; */ + pPager->mxPage = 100; + pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; + /* pPager->state = PAGER_UNLOCK; */ + assert( pPager->state == (tempFile ? PAGER_EXCLUSIVE : PAGER_UNLOCK) ); + /* pPager->errMask = 0; */ + pPager->tempFile = tempFile; + assert( tempFile==PAGER_LOCKINGMODE_NORMAL + || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); + assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); + pPager->exclusiveMode = tempFile; + pPager->memDb = memDb; + pPager->readOnly = readOnly; + /* pPager->needSync = 0; */ + pPager->noSync = pPager->tempFile || !useJournal; + pPager->fullSync = (pPager->noSync?0:1); + pPager->sync_flags = SQLITE_SYNC_NORMAL; + /* pPager->pFirst = 0; */ + /* pPager->pFirstSynced = 0; */ + /* pPager->pLast = 0; */ + pPager->nExtra = FORCE_ALIGNMENT(nExtra); + assert(pPager->fd->pMethods||memDb||tempFile); + if( !memDb ){ + setSectorSize(pPager); + } + /* pPager->pBusyHandler = 0; */ + /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ + *ppPager = pPager; +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + pPager->iInUseMM = 0; + pPager->iInUseDB = 0; + if( !memDb ){ + sqlite3_mutex *mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM2); + sqlite3_mutex_enter(mutex); + pPager->pNext = sqlite3PagerList; + if( sqlite3PagerList ){ + assert( sqlite3PagerList->pPrev==0 ); + sqlite3PagerList->pPrev = pPager; + } + pPager->pPrev = 0; + sqlite3PagerList = pPager; + sqlite3_mutex_leave(mutex); + } +#endif + return SQLITE_OK; +} + +/* +** Set the busy handler function. +*/ +void sqlite3PagerSetBusyhandler(Pager *pPager, BusyHandler *pBusyHandler){ + pPager->pBusyHandler = pBusyHandler; +} + +/* +** Set the destructor for this pager. If not NULL, the destructor is called +** when the reference count on each page reaches zero. The destructor can +** be used to clean up information in the extra segment appended to each page. +** +** The destructor is not called as a result sqlite3PagerClose(). +** Destructors are only called by sqlite3PagerUnref(). +*/ +void sqlite3PagerSetDestructor(Pager *pPager, void (*xDesc)(DbPage*,int)){ + pPager->xDestructor = xDesc; +} + +/* +** Set the reinitializer for this pager. If not NULL, the reinitializer +** is called when the content of a page in cache is restored to its original +** value as a result of a rollback. The callback gives higher-level code +** an opportunity to restore the EXTRA section to agree with the restored +** page data. +*/ +void sqlite3PagerSetReiniter(Pager *pPager, void (*xReinit)(DbPage*,int)){ + pPager->xReiniter = xReinit; +} + +/* +** Set the page size to *pPageSize. If the suggest new page size is +** inappropriate, then an alternative page size is set to that +** value before returning. +*/ +int sqlite3PagerSetPagesize(Pager *pPager, u16 *pPageSize){ + int rc = SQLITE_OK; + u16 pageSize = *pPageSize; + assert( pageSize==0 || (pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE) ); + if( pageSize && pageSize!=pPager->pageSize + && !pPager->memDb && pPager->nRef==0 + ){ + char *pNew = (char *)sqlite3_malloc(pageSize); + if( !pNew ){ + rc = SQLITE_NOMEM; + }else{ + pagerEnter(pPager); + pager_reset(pPager); + pPager->pageSize = pageSize; + setSectorSize(pPager); + sqlite3_free(pPager->pTmpSpace); + pPager->pTmpSpace = pNew; + pagerLeave(pPager); + } + } + *pPageSize = pPager->pageSize; + return rc; +} + +/* +** Attempt to set the maximum database page count if mxPage is positive. +** Make no changes if mxPage is zero or negative. And never reduce the +** maximum page count below the current size of the database. +** +** Regardless of mxPage, return the current maximum page count. +*/ +int sqlite3PagerMaxPageCount(Pager *pPager, int mxPage){ + if( mxPage>0 ){ + pPager->mxPgno = mxPage; + } + sqlite3PagerPagecount(pPager); + return pPager->mxPgno; +} + +/* +** The following set of routines are used to disable the simulated +** I/O error mechanism. These routines are used to avoid simulated +** errors in places where we do not care about errors. +** +** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops +** and generate no code. +*/ +#ifdef SQLITE_TEST +extern int sqlite3_io_error_pending; +extern int sqlite3_io_error_hit; +static int saved_cnt; +void disable_simulated_io_errors(void){ + saved_cnt = sqlite3_io_error_pending; + sqlite3_io_error_pending = -1; +} +void enable_simulated_io_errors(void){ + sqlite3_io_error_pending = saved_cnt; +} +#else +# define disable_simulated_io_errors() +# define enable_simulated_io_errors() +#endif + +/* +** Read the first N bytes from the beginning of the file into memory +** that pDest points to. +** +** No error checking is done. The rational for this is that this function +** may be called even if the file does not exist or contain a header. In +** these cases sqlite3OsRead() will return an error, to which the correct +** response is to zero the memory at pDest and continue. A real IO error +** will presumably recur and be picked up later (Todo: Think about this). +*/ +int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ + int rc = SQLITE_OK; + memset(pDest, 0, N); + assert(MEMDB||pPager->fd->pMethods||pPager->tempFile); + if( pPager->fd->pMethods ){ + IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) + rc = sqlite3OsRead(pPager->fd, pDest, N, 0); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + } + return rc; +} + +/* +** Return the total number of pages in the disk file associated with +** pPager. +** +** If the PENDING_BYTE lies on the page directly after the end of the +** file, then consider this page part of the file too. For example, if +** PENDING_BYTE is byte 4096 (the first byte of page 5) and the size of the +** file is 4096 bytes, 5 is returned instead of 4. +*/ +int sqlite3PagerPagecount(Pager *pPager){ + i64 n = 0; + int rc; + assert( pPager!=0 ); + if( pPager->errCode ){ + return 0; + } + if( pPager->dbSize>=0 ){ + n = pPager->dbSize; + } else { + assert(pPager->fd->pMethods||pPager->tempFile); + if( (pPager->fd->pMethods) + && (rc = sqlite3OsFileSize(pPager->fd, &n))!=SQLITE_OK ){ + pPager->nRef++; + pager_error(pPager, rc); + pPager->nRef--; + return 0; + } + if( n>0 && npageSize ){ + n = 1; + }else{ + n /= pPager->pageSize; + } + if( pPager->state!=PAGER_UNLOCK ){ + pPager->dbSize = n; + } + } + if( n==(PENDING_BYTE/pPager->pageSize) ){ + n++; + } + if( n>pPager->mxPgno ){ + pPager->mxPgno = n; + } + return n; +} + + +#ifndef SQLITE_OMIT_MEMORYDB +/* +** Clear a PgHistory block +*/ +static void clearHistory(PgHistory *pHist){ + sqlite3_free(pHist->pOrig); + sqlite3_free(pHist->pStmt); + pHist->pOrig = 0; + pHist->pStmt = 0; +} +#else +#define clearHistory(x) +#endif + +/* +** Forward declaration +*/ +static int syncJournal(Pager*); + +/* +** Unlink pPg from it's hash chain. Also set the page number to 0 to indicate +** that the page is not part of any hash chain. This is required because the +** sqlite3PagerMovepage() routine can leave a page in the +** pNextFree/pPrevFree list that is not a part of any hash-chain. +*/ +static void unlinkHashChain(Pager *pPager, PgHdr *pPg){ + if( pPg->pgno==0 ){ + assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); + return; + } + if( pPg->pNextHash ){ + pPg->pNextHash->pPrevHash = pPg->pPrevHash; + } + if( pPg->pPrevHash ){ + assert( pPager->aHash[pPg->pgno & (pPager->nHash-1)]!=pPg ); + pPg->pPrevHash->pNextHash = pPg->pNextHash; + }else{ + int h = pPg->pgno & (pPager->nHash-1); + pPager->aHash[h] = pPg->pNextHash; + } + if( MEMDB ){ + clearHistory(PGHDR_TO_HIST(pPg, pPager)); + } + pPg->pgno = 0; + pPg->pNextHash = pPg->pPrevHash = 0; +} + +/* +** Unlink a page from the free list (the list of all pages where nRef==0) +** and from its hash collision chain. +*/ +static void unlinkPage(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + + /* Unlink from free page list */ + lruListRemove(pPg); + + /* Unlink from the pgno hash table */ + unlinkHashChain(pPager, pPg); +} + +/* +** This routine is used to truncate the cache when a database +** is truncated. Drop from the cache all pages whose pgno is +** larger than pPager->dbSize and is unreferenced. +** +** Referenced pages larger than pPager->dbSize are zeroed. +** +** Actually, at the point this routine is called, it would be +** an error to have a referenced page. But rather than delete +** that page and guarantee a subsequent segfault, it seems better +** to zero it and hope that we error out sanely. +*/ +static void pager_truncate_cache(Pager *pPager){ + PgHdr *pPg; + PgHdr **ppPg; + int dbSize = pPager->dbSize; + + ppPg = &pPager->pAll; + while( (pPg = *ppPg)!=0 ){ + if( pPg->pgno<=dbSize ){ + ppPg = &pPg->pNextAll; + }else if( pPg->nRef>0 ){ + memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); + ppPg = &pPg->pNextAll; + }else{ + *ppPg = pPg->pNextAll; + IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); + PAGER_INCR(sqlite3_pager_pgfree_count); + unlinkPage(pPg); + makeClean(pPg); + sqlite3_free(pPg->pData); + sqlite3_free(pPg); + pPager->nPage--; + } + } +} + +/* +** Try to obtain a lock on a file. Invoke the busy callback if the lock +** is currently not available. Repeat until the busy callback returns +** false or until the lock succeeds. +** +** Return SQLITE_OK on success and an error code if we cannot obtain +** the lock. +*/ +static int pager_wait_on_lock(Pager *pPager, int locktype){ + int rc; + + /* The OS lock values must be the same as the Pager lock values */ + assert( PAGER_SHARED==SHARED_LOCK ); + assert( PAGER_RESERVED==RESERVED_LOCK ); + assert( PAGER_EXCLUSIVE==EXCLUSIVE_LOCK ); + + /* If the file is currently unlocked then the size must be unknown */ + assert( pPager->state>=PAGER_SHARED || pPager->dbSize<0 || MEMDB ); + + if( pPager->state>=locktype ){ + rc = SQLITE_OK; + }else{ + do { + rc = sqlite3OsLock(pPager->fd, locktype); + }while( rc==SQLITE_BUSY && sqlite3InvokeBusyHandler(pPager->pBusyHandler) ); + if( rc==SQLITE_OK ){ + pPager->state = locktype; + IOTRACE(("LOCK %p %d\n", pPager, locktype)) + } + } + return rc; +} + +/* +** Truncate the file to the number of pages specified. +*/ +int sqlite3PagerTruncate(Pager *pPager, Pgno nPage){ + int rc; + assert( pPager->state>=PAGER_SHARED || MEMDB ); + sqlite3PagerPagecount(pPager); + if( pPager->errCode ){ + rc = pPager->errCode; + return rc; + } + if( nPage>=(unsigned)pPager->dbSize ){ + return SQLITE_OK; + } + if( MEMDB ){ + pPager->dbSize = nPage; + pager_truncate_cache(pPager); + return SQLITE_OK; + } + pagerEnter(pPager); + rc = syncJournal(pPager); + pagerLeave(pPager); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Get an exclusive lock on the database before truncating. */ + pagerEnter(pPager); + rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); + pagerLeave(pPager); + if( rc!=SQLITE_OK ){ + return rc; + } + + rc = pager_truncate(pPager, nPage); + return rc; +} + +/* +** Shutdown the page cache. Free all memory and close all files. +** +** If a transaction was in progress when this routine is called, that +** transaction is rolled back. All outstanding pages are invalidated +** and their memory is freed. Any attempt to use a page associated +** with this page cache after this function returns will likely +** result in a coredump. +** +** This function always succeeds. If a transaction is active an attempt +** is made to roll it back. If an error occurs during the rollback +** a hot journal may be left in the filesystem but no error is returned +** to the caller. +*/ +int sqlite3PagerClose(Pager *pPager){ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + if( !MEMDB ){ + sqlite3_mutex *mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM2); + sqlite3_mutex_enter(mutex); + if( pPager->pPrev ){ + pPager->pPrev->pNext = pPager->pNext; + }else{ + sqlite3PagerList = pPager->pNext; + } + if( pPager->pNext ){ + pPager->pNext->pPrev = pPager->pPrev; + } + sqlite3_mutex_leave(mutex); + } +#endif + + disable_simulated_io_errors(); + pPager->errCode = 0; + pPager->exclusiveMode = 0; + pager_reset(pPager); + pagerUnlockAndRollback(pPager); + enable_simulated_io_errors(); + PAGERTRACE2("CLOSE %d\n", PAGERID(pPager)); + IOTRACE(("CLOSE %p\n", pPager)) + assert( pPager->errCode || (pPager->journalOpen==0 && pPager->stmtOpen==0) ); + if( pPager->journalOpen ){ + sqlite3OsClose(pPager->jfd); + } + sqlite3_free(pPager->aInJournal); + if( pPager->stmtOpen ){ + sqlite3OsClose(pPager->stfd); + } + sqlite3OsClose(pPager->fd); + /* Temp files are automatically deleted by the OS + ** if( pPager->tempFile ){ + ** sqlite3OsDelete(pPager->zFilename); + ** } + */ + + sqlite3_free(pPager->aHash); + sqlite3_free(pPager->pTmpSpace); + sqlite3_free(pPager); + return SQLITE_OK; +} + +#if !defined(NDEBUG) || defined(SQLITE_TEST) +/* +** Return the page number for the given page data. +*/ +Pgno sqlite3PagerPagenumber(DbPage *p){ + return p->pgno; +} +#endif + +/* +** The page_ref() function increments the reference count for a page. +** If the page is currently on the freelist (the reference count is zero) then +** remove it from the freelist. +** +** For non-test systems, page_ref() is a macro that calls _page_ref() +** online of the reference count is zero. For test systems, page_ref() +** is a real function so that we can set breakpoints and trace it. +*/ +static void _page_ref(PgHdr *pPg){ + if( pPg->nRef==0 ){ + /* The page is currently on the freelist. Remove it. */ + lruListRemove(pPg); + pPg->pPager->nRef++; + } + pPg->nRef++; + REFINFO(pPg); +} +#ifdef SQLITE_DEBUG + static void page_ref(PgHdr *pPg){ + if( pPg->nRef==0 ){ + _page_ref(pPg); + }else{ + pPg->nRef++; + REFINFO(pPg); + } + } +#else +# define page_ref(P) ((P)->nRef==0?_page_ref(P):(void)(P)->nRef++) +#endif + +/* +** Increment the reference count for a page. The input pointer is +** a reference to the page data. +*/ +int sqlite3PagerRef(DbPage *pPg){ + pagerEnter(pPg->pPager); + page_ref(pPg); + pagerLeave(pPg->pPager); + return SQLITE_OK; +} + +/* +** Sync the journal. In other words, make sure all the pages that have +** been written to the journal have actually reached the surface of the +** disk. It is not safe to modify the original database file until after +** the journal has been synced. If the original database is modified before +** the journal is synced and a power failure occurs, the unsynced journal +** data would be lost and we would be unable to completely rollback the +** database changes. Database corruption would occur. +** +** This routine also updates the nRec field in the header of the journal. +** (See comments on the pager_playback() routine for additional information.) +** If the sync mode is FULL, two syncs will occur. First the whole journal +** is synced, then the nRec field is updated, then a second sync occurs. +** +** For temporary databases, we do not care if we are able to rollback +** after a power failure, so no sync occurs. +** +** If the IOCAP_SEQUENTIAL flag is set for the persistent media on which +** the database is stored, then OsSync() is never called on the journal +** file. In this case all that is required is to update the nRec field in +** the journal header. +** +** This routine clears the needSync field of every page current held in +** memory. +*/ +static int syncJournal(Pager *pPager){ + PgHdr *pPg; + int rc = SQLITE_OK; + + + /* Sync the journal before modifying the main database + ** (assuming there is a journal and it needs to be synced.) + */ + if( pPager->needSync ){ + if( !pPager->tempFile ){ + int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); + assert( pPager->journalOpen ); + + /* assert( !pPager->noSync ); // noSync might be set if synchronous + ** was turned off after the transaction was started. Ticket #615 */ +#ifndef NDEBUG + { + /* Make sure the pPager->nRec counter we are keeping agrees + ** with the nRec computed from the size of the journal file. + */ + i64 jSz; + rc = sqlite3OsFileSize(pPager->jfd, &jSz); + if( rc!=0 ) return rc; + assert( pPager->journalOff==jSz ); + } +#endif + if( 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ + /* Write the nRec value into the journal file header. If in + ** full-synchronous mode, sync the journal first. This ensures that + ** all data has really hit the disk before nRec is updated to mark + ** it as a candidate for rollback. + ** + ** This is not required if the persistent media supports the + ** SAFE_APPEND property. Because in this case it is not possible + ** for garbage data to be appended to the file, the nRec field + ** is populated with 0xFFFFFFFF when the journal header is written + ** and never needs to be updated. + */ + i64 jrnlOff; + if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ + PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); + IOTRACE(("JSYNC %p\n", pPager)) + rc = sqlite3OsSync(pPager->jfd, pPager->sync_flags); + if( rc!=0 ) return rc; + } + + jrnlOff = pPager->journalHdr + sizeof(aJournalMagic); + IOTRACE(("JHDR %p %lld %d\n", pPager, jrnlOff, 4)); + rc = write32bits(pPager->jfd, jrnlOff, pPager->nRec); + if( rc ) return rc; + } + if( 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ + PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); + IOTRACE(("JSYNC %p\n", pPager)) + rc = sqlite3OsSync(pPager->jfd, pPager->sync_flags| + (pPager->sync_flags==SQLITE_SYNC_FULL?SQLITE_SYNC_DATAONLY:0) + ); + if( rc!=0 ) return rc; + } + pPager->journalStarted = 1; + } + pPager->needSync = 0; + + /* Erase the needSync flag from every page. + */ + for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ + pPg->needSync = 0; + } + lruListSetFirstSynced(pPager); + } + +#ifndef NDEBUG + /* If the Pager.needSync flag is clear then the PgHdr.needSync + ** flag must also be clear for all pages. Verify that this + ** invariant is true. + */ + else{ + for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ + assert( pPg->needSync==0 ); + } + assert( pPager->lru.pFirstSynced==pPager->lru.pFirst ); + } +#endif + + return rc; +} + +/* +** Merge two lists of pages connected by pDirty and in pgno order. +** Do not both fixing the pPrevDirty pointers. +*/ +static PgHdr *merge_pagelist(PgHdr *pA, PgHdr *pB){ + PgHdr result, *pTail; + pTail = &result; + while( pA && pB ){ + if( pA->pgnopgno ){ + pTail->pDirty = pA; + pTail = pA; + pA = pA->pDirty; + }else{ + pTail->pDirty = pB; + pTail = pB; + pB = pB->pDirty; + } + } + if( pA ){ + pTail->pDirty = pA; + }else if( pB ){ + pTail->pDirty = pB; + }else{ + pTail->pDirty = 0; + } + return result.pDirty; +} + +/* +** Sort the list of pages in accending order by pgno. Pages are +** connected by pDirty pointers. The pPrevDirty pointers are +** corrupted by this sort. +*/ +#define N_SORT_BUCKET_ALLOC 25 +#define N_SORT_BUCKET 25 +#ifdef SQLITE_TEST + int sqlite3_pager_n_sort_bucket = 0; + #undef N_SORT_BUCKET + #define N_SORT_BUCKET \ + (sqlite3_pager_n_sort_bucket?sqlite3_pager_n_sort_bucket:N_SORT_BUCKET_ALLOC) +#endif +static PgHdr *sort_pagelist(PgHdr *pIn){ + PgHdr *a[N_SORT_BUCKET_ALLOC], *p; + int i; + memset(a, 0, sizeof(a)); + while( pIn ){ + p = pIn; + pIn = p->pDirty; + p->pDirty = 0; + for(i=0; ipPager; + + /* At this point there may be either a RESERVED or EXCLUSIVE lock on the + ** database file. If there is already an EXCLUSIVE lock, the following + ** calls to sqlite3OsLock() are no-ops. + ** + ** Moving the lock from RESERVED to EXCLUSIVE actually involves going + ** through an intermediate state PENDING. A PENDING lock prevents new + ** readers from attaching to the database but is unsufficient for us to + ** write. The idea of a PENDING lock is to prevent new readers from + ** coming in while we wait for existing readers to clear. + ** + ** While the pager is in the RESERVED state, the original database file + ** is unchanged and we can rollback without having to playback the + ** journal into the original database file. Once we transition to + ** EXCLUSIVE, it means the database file has been changed and any rollback + ** will require a journal playback. + */ + rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); + if( rc!=SQLITE_OK ){ + return rc; + } + + pList = sort_pagelist(pList); + for(p=pList; p; p=p->pDirty){ + assert( p->dirty ); + p->dirty = 0; + } + while( pList ){ + + /* If the file has not yet been opened, open it now. */ + if( !pPager->fd->pMethods ){ + assert(pPager->tempFile); + rc = sqlite3PagerOpentemp(pPager->pVfs, pPager->fd, pPager->zFilename, + pPager->vfsFlags); + if( rc ) return rc; + } + + /* If there are dirty pages in the page cache with page numbers greater + ** than Pager.dbSize, this means sqlite3PagerTruncate() was called to + ** make the file smaller (presumably by auto-vacuum code). Do not write + ** any such pages to the file. + */ + if( pList->pgno<=pPager->dbSize ){ + i64 offset = (pList->pgno-1)*(i64)pPager->pageSize; + char *pData = CODEC2(pPager, PGHDR_TO_DATA(pList), pList->pgno, 6); + PAGERTRACE4("STORE %d page %d hash(%08x)\n", + PAGERID(pPager), pList->pgno, pager_pagehash(pList)); + IOTRACE(("PGOUT %p %d\n", pPager, pList->pgno)); + rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize, offset); + PAGER_INCR(sqlite3_pager_writedb_count); + PAGER_INCR(pPager->nWrite); + if( pList->pgno==1 ){ + memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); + } + } +#ifndef NDEBUG + else{ + PAGERTRACE3("NOSTORE %d page %d\n", PAGERID(pPager), pList->pgno); + } +#endif + if( rc ) return rc; +#ifdef SQLITE_CHECK_PAGES + pList->pageHash = pager_pagehash(pList); +#endif + pList = pList->pDirty; + } + return SQLITE_OK; +} + +/* +** Collect every dirty page into a dirty list and +** return a pointer to the head of that list. All pages are +** collected even if they are still in use. +*/ +static PgHdr *pager_get_all_dirty_pages(Pager *pPager){ + return pPager->pDirty; +} + +/* +** Return TRUE if there is a hot journal on the given pager. +** A hot journal is one that needs to be played back. +** +** If the current size of the database file is 0 but a journal file +** exists, that is probably an old journal left over from a prior +** database with the same name. Just delete the journal. +*/ +static int hasHotJournal(Pager *pPager){ + sqlite3_vfs *pVfs = pPager->pVfs; + if( !pPager->useJournal ) return 0; + if( !sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS) ){ + return 0; + } + if( sqlite3OsCheckReservedLock(pPager->fd) ){ + return 0; + } + if( sqlite3PagerPagecount(pPager)==0 ){ + sqlite3OsDelete(pVfs, pPager->zJournal, 0); + return 0; + }else{ + return 1; + } +} + +/* +** Try to find a page in the cache that can be recycled. +** +** This routine may return SQLITE_IOERR, SQLITE_FULL or SQLITE_OK. It +** does not set the pPager->errCode variable. +*/ +static int pager_recycle(Pager *pPager, PgHdr **ppPg){ + PgHdr *pPg; + *ppPg = 0; + + /* It is illegal to call this function unless the pager object + ** pointed to by pPager has at least one free page (page with nRef==0). + */ + assert(!MEMDB); + assert(pPager->lru.pFirst); + + /* Find a page to recycle. Try to locate a page that does not + ** require us to do an fsync() on the journal. + */ + pPg = pPager->lru.pFirstSynced; + + /* If we could not find a page that does not require an fsync() + ** on the journal file then fsync the journal file. This is a + ** very slow operation, so we work hard to avoid it. But sometimes + ** it can't be helped. + */ + if( pPg==0 && pPager->lru.pFirst){ + int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); + int rc = syncJournal(pPager); + if( rc!=0 ){ + return rc; + } + if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ + /* If in full-sync mode, write a new journal header into the + ** journal file. This is done to avoid ever modifying a journal + ** header that is involved in the rollback of pages that have + ** already been written to the database (in case the header is + ** trashed when the nRec field is updated). + */ + pPager->nRec = 0; + assert( pPager->journalOff > 0 ); + assert( pPager->doNotSync==0 ); + rc = writeJournalHdr(pPager); + if( rc!=0 ){ + return rc; + } + } + pPg = pPager->lru.pFirst; + } + + assert( pPg->nRef==0 ); + + /* Write the page to the database file if it is dirty. + */ + if( pPg->dirty ){ + int rc; + assert( pPg->needSync==0 ); + makeClean(pPg); + pPg->dirty = 1; + pPg->pDirty = 0; + rc = pager_write_pagelist( pPg ); + pPg->dirty = 0; + if( rc!=SQLITE_OK ){ + return rc; + } + } + assert( pPg->dirty==0 ); + + /* If the page we are recycling is marked as alwaysRollback, then + ** set the global alwaysRollback flag, thus disabling the + ** sqlite3PagerDontRollback() optimization for the rest of this transaction. + ** It is necessary to do this because the page marked alwaysRollback + ** might be reloaded at a later time but at that point we won't remember + ** that is was marked alwaysRollback. This means that all pages must + ** be marked as alwaysRollback from here on out. + */ + if( pPg->alwaysRollback ){ + IOTRACE(("ALWAYS_ROLLBACK %p\n", pPager)) + pPager->alwaysRollback = 1; + } + + /* Unlink the old page from the free list and the hash table + */ + unlinkPage(pPg); + assert( pPg->pgno==0 ); + + *ppPg = pPg; + return SQLITE_OK; +} + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +/* +** This function is called to free superfluous dynamically allocated memory +** held by the pager system. Memory in use by any SQLite pager allocated +** by the current thread may be sqlite3_free()ed. +** +** nReq is the number of bytes of memory required. Once this much has +** been released, the function returns. The return value is the total number +** of bytes of memory released. +*/ +int sqlite3PagerReleaseMemory(int nReq){ + int nReleased = 0; /* Bytes of memory released so far */ + sqlite3_mutex *mutex; /* The MEM2 mutex */ + Pager *pPager; /* For looping over pagers */ + int rc = SQLITE_OK; + + /* Acquire the memory-management mutex + */ + mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM2); + sqlite3_mutex_enter(mutex); + + /* Signal all database connections that memory management wants + ** to have access to the pagers. + */ + for(pPager=sqlite3PagerList; pPager; pPager=pPager->pNext){ + pPager->iInUseMM = 1; + } + + while( rc==SQLITE_OK && (nReq<0 || nReleasedneedSync || pPg->pPager->iInUseDB) ){ + pPg = pPg->gfree.pNext; + } + if( !pPg ){ + pPg = sqlite3LruPageList.pFirst; + while( pPg && pPg->pPager->iInUseDB ){ + pPg = pPg->gfree.pNext; + } + } + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU)); + + /* If pPg==0, then the block above has failed to find a page to + ** recycle. In this case return early - no further memory will + ** be released. + */ + if( !pPg ) break; + + pPager = pPg->pPager; + assert(!pPg->needSync || pPg==pPager->lru.pFirst); + assert(pPg->needSync || pPg==pPager->lru.pFirstSynced); + + rc = pager_recycle(pPager, &pRecycled); + assert(pRecycled==pPg || rc!=SQLITE_OK); + if( rc==SQLITE_OK ){ + /* We've found a page to free. At this point the page has been + ** removed from the page hash-table, free-list and synced-list + ** (pFirstSynced). It is still in the all pages (pAll) list. + ** Remove it from this list before freeing. + ** + ** Todo: Check the Pager.pStmt list to make sure this is Ok. It + ** probably is though. + */ + PgHdr *pTmp; + assert( pPg ); + if( pPg==pPager->pAll ){ + pPager->pAll = pPg->pNextAll; + }else{ + for( pTmp=pPager->pAll; pTmp->pNextAll!=pPg; pTmp=pTmp->pNextAll ){} + pTmp->pNextAll = pPg->pNextAll; + } + nReleased += ( + sizeof(*pPg) + pPager->pageSize + + sizeof(u32) + pPager->nExtra + + MEMDB*sizeof(PgHistory) + ); + IOTRACE(("PGFREE %p %d *\n", pPager, pPg->pgno)); + PAGER_INCR(sqlite3_pager_pgfree_count); + sqlite3_free(pPg->pData); + sqlite3_free(pPg); + pPager->nPage--; + }else{ + /* An error occured whilst writing to the database file or + ** journal in pager_recycle(). The error is not returned to the + ** caller of this function. Instead, set the Pager.errCode variable. + ** The error will be returned to the user (or users, in the case + ** of a shared pager cache) of the pager for which the error occured. + */ + assert( + (rc&0xff)==SQLITE_IOERR || + rc==SQLITE_FULL || + rc==SQLITE_BUSY + ); + assert( pPager->state>=PAGER_RESERVED ); + pager_error(pPager, rc); + } + } + + /* Clear the memory management flags and release the mutex + */ + for(pPager=sqlite3PagerList; pPager; pPager=pPager->pNext){ + pPager->iInUseMM = 0; + } + sqlite3_mutex_leave(mutex); + + /* Return the number of bytes released + */ + return nReleased; +} +#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */ + +/* +** Read the content of page pPg out of the database file. +*/ +static int readDbPage(Pager *pPager, PgHdr *pPg, Pgno pgno){ + int rc; + i64 offset; + assert( MEMDB==0 ); + assert(pPager->fd->pMethods||pPager->tempFile); + if( !pPager->fd->pMethods ){ + return SQLITE_IOERR_SHORT_READ; + } + offset = (pgno-1)*(i64)pPager->pageSize; + rc = sqlite3OsRead(pPager->fd, PGHDR_TO_DATA(pPg), pPager->pageSize, offset); + PAGER_INCR(sqlite3_pager_readdb_count); + PAGER_INCR(pPager->nRead); + IOTRACE(("PGIN %p %d\n", pPager, pgno)); + if( pgno==1 ){ + memcpy(&pPager->dbFileVers, &((u8*)PGHDR_TO_DATA(pPg))[24], + sizeof(pPager->dbFileVers)); + } + CODEC1(pPager, PGHDR_TO_DATA(pPg), pPg->pgno, 3); + PAGERTRACE4("FETCH %d page %d hash(%08x)\n", + PAGERID(pPager), pPg->pgno, pager_pagehash(pPg)); + return rc; +} + + +/* +** This function is called to obtain the shared lock required before +** data may be read from the pager cache. If the shared lock has already +** been obtained, this function is a no-op. +** +** Immediately after obtaining the shared lock (if required), this function +** checks for a hot-journal file. If one is found, an emergency rollback +** is performed immediately. +*/ +static int pagerSharedLock(Pager *pPager){ + int rc = SQLITE_OK; + int isHot = 0; + + /* If this database is opened for exclusive access, has no outstanding + ** page references and is in an error-state, now is the chance to clear + ** the error. Discard the contents of the pager-cache and treat any + ** open journal file as a hot-journal. + */ + if( !MEMDB && pPager->exclusiveMode && pPager->nRef==0 && pPager->errCode ){ + if( pPager->journalOpen ){ + isHot = 1; + } + pager_reset(pPager); + pPager->errCode = SQLITE_OK; + } + + /* If the pager is still in an error state, do not proceed. The error + ** state will be cleared at some point in the future when all page + ** references are dropped and the cache can be discarded. + */ + if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ + return pPager->errCode; + } + + if( pPager->state==PAGER_UNLOCK || isHot ){ + sqlite3_vfs *pVfs = pPager->pVfs; + if( !MEMDB ){ + assert( pPager->nRef==0 ); + if( !pPager->noReadlock ){ + rc = pager_wait_on_lock(pPager, SHARED_LOCK); + if( rc!=SQLITE_OK ){ + return pager_error(pPager, rc); + } + assert( pPager->state>=SHARED_LOCK ); + } + + /* If a journal file exists, and there is no RESERVED lock on the + ** database file, then it either needs to be played back or deleted. + */ + if( hasHotJournal(pPager) || isHot ){ + /* Get an EXCLUSIVE lock on the database file. At this point it is + ** important that a RESERVED lock is not obtained on the way to the + ** EXCLUSIVE lock. If it were, another process might open the + ** database file, detect the RESERVED lock, and conclude that the + ** database is safe to read while this process is still rolling it + ** back. + ** + ** Because the intermediate RESERVED lock is not requested, the + ** second process will get to this point in the code and fail to + ** obtain it's own EXCLUSIVE lock on the database file. + */ + if( pPager->statefd, EXCLUSIVE_LOCK); + if( rc!=SQLITE_OK ){ + pager_unlock(pPager); + return pager_error(pPager, rc); + } + pPager->state = PAGER_EXCLUSIVE; + } + + /* Open the journal for reading only. Return SQLITE_BUSY if + ** we are unable to open the journal file. + ** + ** The journal file does not need to be locked itself. The + ** journal file is never open unless the main database file holds + ** a write lock, so there is never any chance of two or more + ** processes opening the journal at the same time. + ** + ** Open the journal for read/write access. This is because in + ** exclusive-access mode the file descriptor will be kept open and + ** possibly used for a transaction later on. On some systems, the + ** OsTruncate() call used in exclusive-access mode also requires + ** a read/write file handle. + */ + if( !isHot ){ + rc = SQLITE_BUSY; + if( sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS) ){ + int fout = 0; + int f = SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_JOURNAL; + assert( !pPager->tempFile ); + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &fout); + assert( rc!=SQLITE_OK || pPager->jfd->pMethods ); + if( fout&SQLITE_OPEN_READONLY ){ + rc = SQLITE_BUSY; + sqlite3OsClose(pPager->jfd); + } + } + } + if( rc!=SQLITE_OK ){ + pager_unlock(pPager); + return ((rc==SQLITE_NOMEM||rc==SQLITE_IOERR_NOMEM)?rc:SQLITE_BUSY); + } + pPager->journalOpen = 1; + pPager->journalStarted = 0; + pPager->journalOff = 0; + pPager->setMaster = 0; + pPager->journalHdr = 0; + + /* Playback and delete the journal. Drop the database write + ** lock and reacquire the read lock. + */ + rc = pager_playback(pPager, 1); + if( rc!=SQLITE_OK ){ + return pager_error(pPager, rc); + } + assert(pPager->state==PAGER_SHARED || + (pPager->exclusiveMode && pPager->state>PAGER_SHARED) + ); + } + + if( pPager->pAll ){ + /* The shared-lock has just been acquired on the database file + ** and there are already pages in the cache (from a previous + ** read or write transaction). Check to see if the database + ** has been modified. If the database has changed, flush the + ** cache. + ** + ** Database changes is detected by looking at 15 bytes beginning + ** at offset 24 into the file. The first 4 of these 16 bytes are + ** a 32-bit counter that is incremented with each change. The + ** other bytes change randomly with each file change when + ** a codec is in use. + ** + ** There is a vanishingly small chance that a change will not be + ** detected. The chance of an undetected change is so small that + ** it can be neglected. + */ + char dbFileVers[sizeof(pPager->dbFileVers)]; + sqlite3PagerPagecount(pPager); + + if( pPager->errCode ){ + return pPager->errCode; + } + + if( pPager->dbSize>0 ){ + IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); + rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24); + if( rc!=SQLITE_OK ){ + return rc; + } + }else{ + memset(dbFileVers, 0, sizeof(dbFileVers)); + } + + if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ + pager_reset(pPager); + } + } + } + assert( pPager->exclusiveMode || pPager->state<=PAGER_SHARED ); + if( pPager->state==PAGER_UNLOCK ){ + pPager->state = PAGER_SHARED; + } + } + + return rc; +} + +/* +** Allocate a PgHdr object. Either create a new one or reuse +** an existing one that is not otherwise in use. +** +** A new PgHdr structure is created if any of the following are +** true: +** +** (1) We have not exceeded our maximum allocated cache size +** as set by the "PRAGMA cache_size" command. +** +** (2) There are no unused PgHdr objects available at this time. +** +** (3) This is an in-memory database. +** +** (4) There are no PgHdr objects that do not require a journal +** file sync and a sync of the journal file is currently +** prohibited. +** +** Otherwise, reuse an existing PgHdr. In other words, reuse an +** existing PgHdr if all of the following are true: +** +** (1) We have reached or exceeded the maximum cache size +** allowed by "PRAGMA cache_size". +** +** (2) There is a PgHdr available with PgHdr->nRef==0 +** +** (3) We are not in an in-memory database +** +** (4) Either there is an available PgHdr that does not need +** to be synced to disk or else disk syncing is currently +** allowed. +*/ +static int pagerAllocatePage(Pager *pPager, PgHdr **ppPg){ + int rc = SQLITE_OK; + PgHdr *pPg; + void *pData; + + /* Create a new PgHdr if any of the four conditions defined + ** above are met: */ + if( pPager->nPagemxPage + || pPager->lru.pFirst==0 + || MEMDB + || (pPager->lru.pFirstSynced==0 && pPager->doNotSync) + ){ + if( pPager->nPage>=pPager->nHash ){ + pager_resize_hash_table(pPager, + pPager->nHash<256 ? 256 : pPager->nHash*2); + if( pPager->nHash==0 ){ + rc = SQLITE_NOMEM; + goto pager_allocate_out; + } + } + pagerLeave(pPager); + pPg = sqlite3_malloc( sizeof(*pPg) + sizeof(u32) + pPager->nExtra + + MEMDB*sizeof(PgHistory) ); + if( pPg ){ + pData = sqlite3_malloc( pPager->pageSize ); + if( pData==0 ){ + sqlite3_free(pPg); + pPg = 0; + } + } + pagerEnter(pPager); + if( pPg==0 ){ + rc = SQLITE_NOMEM; + goto pager_allocate_out; + } + memset(pPg, 0, sizeof(*pPg)); + if( MEMDB ){ + memset(PGHDR_TO_HIST(pPg, pPager), 0, sizeof(PgHistory)); + } + pPg->pData = pData; + pPg->pPager = pPager; + pPg->pNextAll = pPager->pAll; + pPager->pAll = pPg; + pPager->nPage++; + }else{ + /* Recycle an existing page with a zero ref-count. */ + rc = pager_recycle(pPager, &pPg); + if( rc==SQLITE_BUSY ){ + rc = SQLITE_IOERR_BLOCKED; + } + if( rc!=SQLITE_OK ){ + goto pager_allocate_out; + } + assert( pPager->state>=SHARED_LOCK ); + assert(pPg); + } + *ppPg = pPg; + +pager_allocate_out: + return rc; +} + +/* +** Make sure we have the content for a page. If the page was +** previously acquired with noContent==1, then the content was +** just initialized to zeros instead of being read from disk. +** But now we need the real data off of disk. So make sure we +** have it. Read it in if we do not have it already. +*/ +static int pager_get_content(PgHdr *pPg){ + if( pPg->needRead ){ + int rc = readDbPage(pPg->pPager, pPg, pPg->pgno); + if( rc==SQLITE_OK ){ + pPg->needRead = 0; + }else{ + return rc; + } + } + return SQLITE_OK; +} + +/* +** Acquire a page. +** +** A read lock on the disk file is obtained when the first page is acquired. +** This read lock is dropped when the last page is released. +** +** This routine works for any page number greater than 0. If the database +** file is smaller than the requested page, then no actual disk +** read occurs and the memory image of the page is initialized to +** all zeros. The extra data appended to a page is always initialized +** to zeros the first time a page is loaded into memory. +** +** The acquisition might fail for several reasons. In all cases, +** an appropriate error code is returned and *ppPage is set to NULL. +** +** See also sqlite3PagerLookup(). Both this routine and Lookup() attempt +** to find a page in the in-memory cache first. If the page is not already +** in memory, this routine goes to disk to read it in whereas Lookup() +** just returns 0. This routine acquires a read-lock the first time it +** has to go to disk, and could also playback an old journal if necessary. +** Since Lookup() never goes to disk, it never has to deal with locks +** or journal files. +** +** If noContent is false, the page contents are actually read from disk. +** If noContent is true, it means that we do not care about the contents +** of the page at this time, so do not do a disk read. Just fill in the +** page content with zeros. But mark the fact that we have not read the +** content by setting the PgHdr.needRead flag. Later on, if +** sqlite3PagerWrite() is called on this page or if this routine is +** called again with noContent==0, that means that the content is needed +** and the disk read should occur at that point. +*/ +static int pagerAcquire( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int noContent /* Do not bother reading content from disk if true */ +){ + PgHdr *pPg; + int rc; + + assert( pPager->state==PAGER_UNLOCK || pPager->nRef>0 || pgno==1 ); + + /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page + ** number greater than this, or zero, is requested. + */ + if( pgno>PAGER_MAX_PGNO || pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ + return SQLITE_CORRUPT_BKPT; + } + + /* Make sure we have not hit any critical errors. + */ + assert( pPager!=0 ); + *ppPage = 0; + + /* If this is the first page accessed, then get a SHARED lock + ** on the database file. pagerSharedLock() is a no-op if + ** a database lock is already held. + */ + rc = pagerSharedLock(pPager); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( pPager->state!=PAGER_UNLOCK ); + + pPg = pager_lookup(pPager, pgno); + if( pPg==0 ){ + /* The requested page is not in the page cache. */ + int nMax; + int h; + PAGER_INCR(pPager->nMiss); + rc = pagerAllocatePage(pPager, &pPg); + if( rc!=SQLITE_OK ){ + return rc; + } + + pPg->pgno = pgno; + assert( !MEMDB || pgno>pPager->stmtSize ); + if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ +#if 0 + sqlite3CheckMemory(pPager->aInJournal, pgno/8); +#endif + assert( pPager->journalOpen ); + pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; + pPg->needSync = 0; + }else{ + pPg->inJournal = 0; + pPg->needSync = 0; + } + + makeClean(pPg); + pPg->nRef = 1; + REFINFO(pPg); + + pPager->nRef++; + if( pPager->nExtra>0 ){ + memset(PGHDR_TO_EXTRA(pPg, pPager), 0, pPager->nExtra); + } + nMax = sqlite3PagerPagecount(pPager); + if( pPager->errCode ){ + rc = pPager->errCode; + sqlite3PagerUnref(pPg); + return rc; + } + + /* Populate the page with data, either by reading from the database + ** file, or by setting the entire page to zero. + */ + if( nMax<(int)pgno || MEMDB || (noContent && !pPager->alwaysRollback) ){ + if( pgno>pPager->mxPgno ){ + sqlite3PagerUnref(pPg); + return SQLITE_FULL; + } + memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); + pPg->needRead = noContent && !pPager->alwaysRollback; + IOTRACE(("ZERO %p %d\n", pPager, pgno)); + }else{ + rc = readDbPage(pPager, pPg, pgno); + if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ + pPg->pgno = 0; + sqlite3PagerUnref(pPg); + return rc; + } + pPg->needRead = 0; + } + + /* Link the page into the page hash table */ + h = pgno & (pPager->nHash-1); + assert( pgno!=0 ); + pPg->pNextHash = pPager->aHash[h]; + pPager->aHash[h] = pPg; + if( pPg->pNextHash ){ + assert( pPg->pNextHash->pPrevHash==0 ); + pPg->pNextHash->pPrevHash = pPg; + } + +#ifdef SQLITE_CHECK_PAGES + pPg->pageHash = pager_pagehash(pPg); +#endif + }else{ + /* The requested page is in the page cache. */ + assert(pPager->nRef>0 || pgno==1); + PAGER_INCR(pPager->nHit); + if( !noContent ){ + rc = pager_get_content(pPg); + if( rc ){ + return rc; + } + } + page_ref(pPg); + } + *ppPage = pPg; + return SQLITE_OK; +} +int sqlite3PagerAcquire( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int noContent /* Do not bother reading content from disk if true */ +){ + int rc; + pagerEnter(pPager); + rc = pagerAcquire(pPager, pgno, ppPage, noContent); + pagerLeave(pPager); + return rc; +} + + +/* +** Acquire a page if it is already in the in-memory cache. Do +** not read the page from disk. Return a pointer to the page, +** or 0 if the page is not in cache. +** +** See also sqlite3PagerGet(). The difference between this routine +** and sqlite3PagerGet() is that _get() will go to the disk and read +** in the page if the page is not already in cache. This routine +** returns NULL if the page is not in cache or if a disk I/O error +** has ever happened. +*/ +DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ + PgHdr *pPg = 0; + + assert( pPager!=0 ); + assert( pgno!=0 ); + + pagerEnter(pPager); + if( pPager->state==PAGER_UNLOCK ){ + assert( !pPager->pAll || pPager->exclusiveMode ); + }else if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ + /* Do nothing */ + }else if( (pPg = pager_lookup(pPager, pgno))!=0 ){ + page_ref(pPg); + } + pagerLeave(pPager); + return pPg; +} + +/* +** Release a page. +** +** If the number of references to the page drop to zero, then the +** page is added to the LRU list. When all references to all pages +** are released, a rollback occurs and the lock on the database is +** removed. +*/ +int sqlite3PagerUnref(DbPage *pPg){ + Pager *pPager = pPg->pPager; + + /* Decrement the reference count for this page + */ + assert( pPg->nRef>0 ); + pagerEnter(pPg->pPager); + pPg->nRef--; + REFINFO(pPg); + + CHECK_PAGE(pPg); + + /* When the number of references to a page reach 0, call the + ** destructor and add the page to the freelist. + */ + if( pPg->nRef==0 ){ + + lruListAdd(pPg); + if( pPager->xDestructor ){ + pPager->xDestructor(pPg, pPager->pageSize); + } + + /* When all pages reach the freelist, drop the read lock from + ** the database file. + */ + pPager->nRef--; + assert( pPager->nRef>=0 ); + if( pPager->nRef==0 && (!pPager->exclusiveMode || pPager->journalOff>0) ){ + pagerUnlockAndRollback(pPager); + } + } + pagerLeave(pPager); + return SQLITE_OK; +} + +/* +** Create a journal file for pPager. There should already be a RESERVED +** or EXCLUSIVE lock on the database file when this routine is called. +** +** Return SQLITE_OK if everything. Return an error code and release the +** write lock if anything goes wrong. +*/ +static int pager_open_journal(Pager *pPager){ + sqlite3_vfs *pVfs = pPager->pVfs; + int flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_CREATE); + + int rc; + assert( !MEMDB ); + assert( pPager->state>=PAGER_RESERVED ); + assert( pPager->journalOpen==0 ); + assert( pPager->useJournal ); + assert( pPager->aInJournal==0 ); + sqlite3PagerPagecount(pPager); + pagerLeave(pPager); + pPager->aInJournal = sqlite3MallocZero( pPager->dbSize/8 + 1 ); + pagerEnter(pPager); + if( pPager->aInJournal==0 ){ + rc = SQLITE_NOMEM; + goto failed_to_open_journal; + } + + if( pPager->tempFile ){ + flags |= (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL); + }else{ + flags |= (SQLITE_OPEN_MAIN_JOURNAL); + } +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + rc = sqlite3JournalOpen( + pVfs, pPager->zJournal, pPager->jfd, flags, jrnlBufferSize(pPager) + ); +#else + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, flags, 0); +#endif + assert( rc!=SQLITE_OK || pPager->jfd->pMethods ); + pPager->journalOff = 0; + pPager->setMaster = 0; + pPager->journalHdr = 0; + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM ){ + sqlite3OsDelete(pVfs, pPager->zJournal, 0); + } + goto failed_to_open_journal; + } + pPager->journalOpen = 1; + pPager->journalStarted = 0; + pPager->needSync = 0; + pPager->alwaysRollback = 0; + pPager->nRec = 0; + if( pPager->errCode ){ + rc = pPager->errCode; + goto failed_to_open_journal; + } + pPager->origDbSize = pPager->dbSize; + + rc = writeJournalHdr(pPager); + + if( pPager->stmtAutoopen && rc==SQLITE_OK ){ + rc = sqlite3PagerStmtBegin(pPager); + } + if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM && rc!=SQLITE_IOERR_NOMEM ){ + rc = pager_end_transaction(pPager); + if( rc==SQLITE_OK ){ + rc = SQLITE_FULL; + } + } + return rc; + +failed_to_open_journal: + sqlite3_free(pPager->aInJournal); + pPager->aInJournal = 0; + return rc; +} + +/* +** Acquire a write-lock on the database. The lock is removed when +** the any of the following happen: +** +** * sqlite3PagerCommitPhaseTwo() is called. +** * sqlite3PagerRollback() is called. +** * sqlite3PagerClose() is called. +** * sqlite3PagerUnref() is called to on every outstanding page. +** +** The first parameter to this routine is a pointer to any open page of the +** database file. Nothing changes about the page - it is used merely to +** acquire a pointer to the Pager structure and as proof that there is +** already a read-lock on the database. +** +** The second parameter indicates how much space in bytes to reserve for a +** master journal file-name at the start of the journal when it is created. +** +** A journal file is opened if this is not a temporary file. For temporary +** files, the opening of the journal file is deferred until there is an +** actual need to write to the journal. +** +** If the database is already reserved for writing, this routine is a no-op. +** +** If exFlag is true, go ahead and get an EXCLUSIVE lock on the file +** immediately instead of waiting until we try to flush the cache. The +** exFlag is ignored if a transaction is already active. +*/ +int sqlite3PagerBegin(DbPage *pPg, int exFlag){ + Pager *pPager = pPg->pPager; + int rc = SQLITE_OK; + pagerEnter(pPager); + assert( pPg->nRef>0 ); + assert( pPager->state!=PAGER_UNLOCK ); + if( pPager->state==PAGER_SHARED ){ + assert( pPager->aInJournal==0 ); + if( MEMDB ){ + pPager->state = PAGER_EXCLUSIVE; + pPager->origDbSize = pPager->dbSize; + }else{ + rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); + if( rc==SQLITE_OK ){ + pPager->state = PAGER_RESERVED; + if( exFlag ){ + rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); + } + } + if( rc!=SQLITE_OK ){ + pagerLeave(pPager); + return rc; + } + pPager->dirtyCache = 0; + PAGERTRACE2("TRANSACTION %d\n", PAGERID(pPager)); + if( pPager->useJournal && !pPager->tempFile ){ + rc = pager_open_journal(pPager); + } + } + }else if( pPager->journalOpen && pPager->journalOff==0 ){ + /* This happens when the pager was in exclusive-access mode last + ** time a (read or write) transaction was successfully concluded + ** by this connection. Instead of deleting the journal file it was + ** kept open and truncated to 0 bytes. + */ + assert( pPager->nRec==0 ); + assert( pPager->origDbSize==0 ); + assert( pPager->aInJournal==0 ); + sqlite3PagerPagecount(pPager); + pagerLeave(pPager); + pPager->aInJournal = sqlite3MallocZero( pPager->dbSize/8 + 1 ); + pagerEnter(pPager); + if( !pPager->aInJournal ){ + rc = SQLITE_NOMEM; + }else{ + pPager->origDbSize = pPager->dbSize; + rc = writeJournalHdr(pPager); + } + } + assert( !pPager->journalOpen || pPager->journalOff>0 || rc!=SQLITE_OK ); + pagerLeave(pPager); + return rc; +} + +/* +** Make a page dirty. Set its dirty flag and add it to the dirty +** page list. +*/ +static void makeDirty(PgHdr *pPg){ + if( pPg->dirty==0 ){ + Pager *pPager = pPg->pPager; + pPg->dirty = 1; + pPg->pDirty = pPager->pDirty; + if( pPager->pDirty ){ + pPager->pDirty->pPrevDirty = pPg; + } + pPg->pPrevDirty = 0; + pPager->pDirty = pPg; + } +} + +/* +** Make a page clean. Clear its dirty bit and remove it from the +** dirty page list. +*/ +static void makeClean(PgHdr *pPg){ + if( pPg->dirty ){ + pPg->dirty = 0; + if( pPg->pDirty ){ + assert( pPg->pDirty->pPrevDirty==pPg ); + pPg->pDirty->pPrevDirty = pPg->pPrevDirty; + } + if( pPg->pPrevDirty ){ + assert( pPg->pPrevDirty->pDirty==pPg ); + pPg->pPrevDirty->pDirty = pPg->pDirty; + }else{ + assert( pPg->pPager->pDirty==pPg ); + pPg->pPager->pDirty = pPg->pDirty; + } + } +} + + +/* +** Mark a data page as writeable. The page is written into the journal +** if it is not there already. This routine must be called before making +** changes to a page. +** +** The first time this routine is called, the pager creates a new +** journal and acquires a RESERVED lock on the database. If the RESERVED +** lock could not be acquired, this routine returns SQLITE_BUSY. The +** calling routine must check for that return value and be careful not to +** change any page data until this routine returns SQLITE_OK. +** +** If the journal file could not be written because the disk is full, +** then this routine returns SQLITE_FULL and does an immediate rollback. +** All subsequent write attempts also return SQLITE_FULL until there +** is a call to sqlite3PagerCommit() or sqlite3PagerRollback() to +** reset. +*/ +static int pager_write(PgHdr *pPg){ + void *pData = PGHDR_TO_DATA(pPg); + Pager *pPager = pPg->pPager; + int rc = SQLITE_OK; + + /* Check for errors + */ + if( pPager->errCode ){ + return pPager->errCode; + } + if( pPager->readOnly ){ + return SQLITE_PERM; + } + + assert( !pPager->setMaster ); + + CHECK_PAGE(pPg); + + /* If this page was previously acquired with noContent==1, that means + ** we didn't really read in the content of the page. This can happen + ** (for example) when the page is being moved to the freelist. But + ** now we are (perhaps) moving the page off of the freelist for + ** reuse and we need to know its original content so that content + ** can be stored in the rollback journal. So do the read at this + ** time. + */ + rc = pager_get_content(pPg); + if( rc ){ + return rc; + } + + /* Mark the page as dirty. If the page has already been written + ** to the journal then we can return right away. + */ + makeDirty(pPg); + if( pPg->inJournal && (pageInStatement(pPg) || pPager->stmtInUse==0) ){ + pPager->dirtyCache = 1; + }else{ + + /* If we get this far, it means that the page needs to be + ** written to the transaction journal or the ckeckpoint journal + ** or both. + ** + ** First check to see that the transaction journal exists and + ** create it if it does not. + */ + assert( pPager->state!=PAGER_UNLOCK ); + rc = sqlite3PagerBegin(pPg, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( pPager->state>=PAGER_RESERVED ); + if( !pPager->journalOpen && pPager->useJournal ){ + rc = pager_open_journal(pPager); + if( rc!=SQLITE_OK ) return rc; + } + assert( pPager->journalOpen || !pPager->useJournal ); + pPager->dirtyCache = 1; + + /* The transaction journal now exists and we have a RESERVED or an + ** EXCLUSIVE lock on the main database file. Write the current page to + ** the transaction journal if it is not there already. + */ + if( !pPg->inJournal && (pPager->useJournal || MEMDB) ){ + if( (int)pPg->pgno <= pPager->origDbSize ){ + if( MEMDB ){ + PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); + PAGERTRACE3("JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); + assert( pHist->pOrig==0 ); + pHist->pOrig = sqlite3_malloc( pPager->pageSize ); + if( pHist->pOrig ){ + memcpy(pHist->pOrig, PGHDR_TO_DATA(pPg), pPager->pageSize); + } + }else{ + u32 cksum; + char *pData2; + + /* We should never write to the journal file the page that + ** contains the database locks. The following assert verifies + ** that we do not. */ + assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); + pData2 = CODEC2(pPager, pData, pPg->pgno, 7); + cksum = pager_cksum(pPager, (u8*)pData2); + rc = write32bits(pPager->jfd, pPager->journalOff, pPg->pgno); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, + pPager->journalOff + 4); + pPager->journalOff += pPager->pageSize+4; + } + if( rc==SQLITE_OK ){ + rc = write32bits(pPager->jfd, pPager->journalOff, cksum); + pPager->journalOff += 4; + } + IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, + pPager->journalOff, pPager->pageSize)); + PAGER_INCR(sqlite3_pager_writej_count); + PAGERTRACE5("JOURNAL %d page %d needSync=%d hash(%08x)\n", + PAGERID(pPager), pPg->pgno, pPg->needSync, pager_pagehash(pPg)); + + /* An error has occured writing to the journal file. The + ** transaction will be rolled back by the layer above. + */ + if( rc!=SQLITE_OK ){ + return rc; + } + + pPager->nRec++; + assert( pPager->aInJournal!=0 ); + pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); + pPg->needSync = !pPager->noSync; + if( pPager->stmtInUse ){ + pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); + } + } + }else{ + pPg->needSync = !pPager->journalStarted && !pPager->noSync; + PAGERTRACE4("APPEND %d page %d needSync=%d\n", + PAGERID(pPager), pPg->pgno, pPg->needSync); + } + if( pPg->needSync ){ + pPager->needSync = 1; + } + pPg->inJournal = 1; + } + + /* If the statement journal is open and the page is not in it, + ** then write the current page to the statement journal. Note that + ** the statement journal format differs from the standard journal format + ** in that it omits the checksums and the header. + */ + if( pPager->stmtInUse + && !pageInStatement(pPg) + && (int)pPg->pgno<=pPager->stmtSize + ){ + assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); + if( MEMDB ){ + PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); + assert( pHist->pStmt==0 ); + pHist->pStmt = sqlite3_malloc( pPager->pageSize ); + if( pHist->pStmt ){ + memcpy(pHist->pStmt, PGHDR_TO_DATA(pPg), pPager->pageSize); + } + PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); + page_add_to_stmt_list(pPg); + }else{ + i64 offset = pPager->stmtNRec*(4+pPager->pageSize); + char *pData2 = CODEC2(pPager, pData, pPg->pgno, 7); + rc = write32bits(pPager->stfd, offset, pPg->pgno); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->stfd, pData2, pPager->pageSize, offset+4); + } + PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); + if( rc!=SQLITE_OK ){ + return rc; + } + pPager->stmtNRec++; + assert( pPager->aInStmt!=0 ); + pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); + } + } + } + + /* Update the database size and return. + */ + assert( pPager->state>=PAGER_SHARED ); + if( pPager->dbSize<(int)pPg->pgno ){ + pPager->dbSize = pPg->pgno; + if( !MEMDB && pPager->dbSize==PENDING_BYTE/pPager->pageSize ){ + pPager->dbSize++; + } + } + return rc; +} + +/* +** This function is used to mark a data-page as writable. It uses +** pager_write() to open a journal file (if it is not already open) +** and write the page *pData to the journal. +** +** The difference between this function and pager_write() is that this +** function also deals with the special case where 2 or more pages +** fit on a single disk sector. In this case all co-resident pages +** must have been written to the journal file before returning. +*/ +int sqlite3PagerWrite(DbPage *pDbPage){ + int rc = SQLITE_OK; + + PgHdr *pPg = pDbPage; + Pager *pPager = pPg->pPager; + Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize); + + pagerEnter(pPager); + if( !MEMDB && nPagePerSector>1 ){ + Pgno nPageCount; /* Total number of pages in database file */ + Pgno pg1; /* First page of the sector pPg is located on. */ + int nPage; /* Number of pages starting at pg1 to journal */ + int ii; + int needSync = 0; + + /* Set the doNotSync flag to 1. This is because we cannot allow a journal + ** header to be written between the pages journaled by this function. + */ + assert( pPager->doNotSync==0 ); + pPager->doNotSync = 1; + + /* This trick assumes that both the page-size and sector-size are + ** an integer power of 2. It sets variable pg1 to the identifier + ** of the first page of the sector pPg is located on. + */ + pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; + + nPageCount = sqlite3PagerPagecount(pPager); + if( pPg->pgno>nPageCount ){ + nPage = (pPg->pgno - pg1)+1; + }else if( (pg1+nPagePerSector-1)>nPageCount ){ + nPage = nPageCount+1-pg1; + }else{ + nPage = nPagePerSector; + } + assert(nPage>0); + assert(pg1<=pPg->pgno); + assert((pg1+nPage)>pPg->pgno); + + for(ii=0; iiaInJournal || pg==pPg->pgno || + pg>pPager->origDbSize || !(pPager->aInJournal[pg/8]&(1<<(pg&7))) + ) { + if( pg!=PAGER_MJ_PGNO(pPager) ){ + rc = sqlite3PagerGet(pPager, pg, &pPage); + if( rc==SQLITE_OK ){ + rc = pager_write(pPage); + if( pPage->needSync ){ + needSync = 1; + } + sqlite3PagerUnref(pPage); + } + } + }else if( (pPage = pager_lookup(pPager, pg)) ){ + if( pPage->needSync ){ + needSync = 1; + } + } + } + + /* If the PgHdr.needSync flag is set for any of the nPage pages + ** starting at pg1, then it needs to be set for all of them. Because + ** writing to any of these nPage pages may damage the others, the + ** journal file must contain sync()ed copies of all of them + ** before any of them can be written out to the database file. + */ + if( needSync ){ + for(ii=0; iineedSync = 1; + } + assert(pPager->needSync); + } + + assert( pPager->doNotSync==1 ); + pPager->doNotSync = 0; + }else{ + rc = pager_write(pDbPage); + } + pagerLeave(pPager); + return rc; +} + +/* +** Return TRUE if the page given in the argument was previously passed +** to sqlite3PagerWrite(). In other words, return TRUE if it is ok +** to change the content of the page. +*/ +#ifndef NDEBUG +int sqlite3PagerIswriteable(DbPage *pPg){ + return pPg->dirty; +} +#endif + +#ifndef SQLITE_OMIT_VACUUM +/* +** Replace the content of a single page with the information in the third +** argument. +*/ +int sqlite3PagerOverwrite(Pager *pPager, Pgno pgno, void *pData){ + PgHdr *pPg; + int rc; + + pagerEnter(pPager); + rc = sqlite3PagerGet(pPager, pgno, &pPg); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pPg); + if( rc==SQLITE_OK ){ + memcpy(sqlite3PagerGetData(pPg), pData, pPager->pageSize); + } + sqlite3PagerUnref(pPg); + } + pagerLeave(pPager); + return rc; +} +#endif + +/* +** A call to this routine tells the pager that it is not necessary to +** write the information on page pPg back to the disk, even though +** that page might be marked as dirty. +** +** The overlying software layer calls this routine when all of the data +** on the given page is unused. The pager marks the page as clean so +** that it does not get written to disk. +** +** Tests show that this optimization, together with the +** sqlite3PagerDontRollback() below, more than double the speed +** of large INSERT operations and quadruple the speed of large DELETEs. +** +** When this routine is called, set the alwaysRollback flag to true. +** Subsequent calls to sqlite3PagerDontRollback() for the same page +** will thereafter be ignored. This is necessary to avoid a problem +** where a page with data is added to the freelist during one part of +** a transaction then removed from the freelist during a later part +** of the same transaction and reused for some other purpose. When it +** is first added to the freelist, this routine is called. When reused, +** the sqlite3PagerDontRollback() routine is called. But because the +** page contains critical data, we still need to be sure it gets +** rolled back in spite of the sqlite3PagerDontRollback() call. +*/ +void sqlite3PagerDontWrite(DbPage *pDbPage){ + PgHdr *pPg = pDbPage; + Pager *pPager = pPg->pPager; + + if( MEMDB ) return; + pagerEnter(pPager); + pPg->alwaysRollback = 1; + if( pPg->dirty && !pPager->stmtInUse ){ + assert( pPager->state>=PAGER_SHARED ); + if( pPager->dbSize==(int)pPg->pgno && pPager->origDbSizedbSize ){ + /* If this pages is the last page in the file and the file has grown + ** during the current transaction, then do NOT mark the page as clean. + ** When the database file grows, we must make sure that the last page + ** gets written at least once so that the disk file will be the correct + ** size. If you do not write this page and the size of the file + ** on the disk ends up being too small, that can lead to database + ** corruption during the next transaction. + */ + }else{ + PAGERTRACE3("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)); + IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) + makeClean(pPg); +#ifdef SQLITE_CHECK_PAGES + pPg->pageHash = pager_pagehash(pPg); +#endif + } + } + pagerLeave(pPager); +} + +/* +** A call to this routine tells the pager that if a rollback occurs, +** it is not necessary to restore the data on the given page. This +** means that the pager does not have to record the given page in the +** rollback journal. +** +** If we have not yet actually read the content of this page (if +** the PgHdr.needRead flag is set) then this routine acts as a promise +** that we will never need to read the page content in the future. +** so the needRead flag can be cleared at this point. +*/ +void sqlite3PagerDontRollback(DbPage *pPg){ + Pager *pPager = pPg->pPager; + + pagerEnter(pPager); + assert( pPager->state>=PAGER_RESERVED ); + if( pPager->journalOpen==0 ) return; + if( pPg->alwaysRollback || pPager->alwaysRollback || MEMDB ) return; + if( !pPg->inJournal && (int)pPg->pgno <= pPager->origDbSize ){ + assert( pPager->aInJournal!=0 ); + pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); + pPg->inJournal = 1; + pPg->needRead = 0; + if( pPager->stmtInUse ){ + pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); + } + PAGERTRACE3("DONT_ROLLBACK page %d of %d\n", pPg->pgno, PAGERID(pPager)); + IOTRACE(("GARBAGE %p %d\n", pPager, pPg->pgno)) + } + if( pPager->stmtInUse + && !pageInStatement(pPg) + && (int)pPg->pgno<=pPager->stmtSize + ){ + assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); + assert( pPager->aInStmt!=0 ); + pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); + } + pagerLeave(pPager); +} + + +/* +** This routine is called to increment the database file change-counter, +** stored at byte 24 of the pager file. +*/ +static int pager_incr_changecounter(Pager *pPager, int isDirect){ + PgHdr *pPgHdr; + u32 change_counter; + int rc = SQLITE_OK; + + if( !pPager->changeCountDone ){ + /* Open page 1 of the file for writing. */ + rc = sqlite3PagerGet(pPager, 1, &pPgHdr); + if( rc!=SQLITE_OK ) return rc; + + if( !isDirect ){ + rc = sqlite3PagerWrite(pPgHdr); + if( rc!=SQLITE_OK ){ + sqlite3PagerUnref(pPgHdr); + return rc; + } + } + + /* Increment the value just read and write it back to byte 24. */ + change_counter = sqlite3Get4byte((u8*)pPager->dbFileVers); + change_counter++; + put32bits(((char*)PGHDR_TO_DATA(pPgHdr))+24, change_counter); + + if( isDirect && pPager->fd->pMethods ){ + const void *zBuf = PGHDR_TO_DATA(pPgHdr); + rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); + } + + /* Release the page reference. */ + sqlite3PagerUnref(pPgHdr); + pPager->changeCountDone = 1; + } + return rc; +} + +/* +** Sync the database file for the pager pPager. zMaster points to the name +** of a master journal file that should be written into the individual +** journal file. zMaster may be NULL, which is interpreted as no master +** journal (a single database transaction). +** +** This routine ensures that the journal is synced, all dirty pages written +** to the database file and the database file synced. The only thing that +** remains to commit the transaction is to delete the journal file (or +** master journal file if specified). +** +** Note that if zMaster==NULL, this does not overwrite a previous value +** passed to an sqlite3PagerCommitPhaseOne() call. +** +** If parameter nTrunc is non-zero, then the pager file is truncated to +** nTrunc pages (this is used by auto-vacuum databases). +*/ +int sqlite3PagerCommitPhaseOne(Pager *pPager, const char *zMaster, Pgno nTrunc){ + int rc = SQLITE_OK; + + PAGERTRACE4("DATABASE SYNC: File=%s zMaster=%s nTrunc=%d\n", + pPager->zFilename, zMaster, nTrunc); + pagerEnter(pPager); + + /* If this is an in-memory db, or no pages have been written to, or this + ** function has already been called, it is a no-op. + */ + if( pPager->state!=PAGER_SYNCED && !MEMDB && pPager->dirtyCache ){ + PgHdr *pPg; + +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + /* The atomic-write optimization can be used if all of the + ** following are true: + ** + ** + The file-system supports the atomic-write property for + ** blocks of size page-size, and + ** + This commit is not part of a multi-file transaction, and + ** + Exactly one page has been modified and store in the journal file. + ** + ** If the optimization can be used, then the journal file will never + ** be created for this transaction. + */ + int useAtomicWrite = ( + !zMaster && + pPager->journalOff==jrnlBufferSize(pPager) && + nTrunc==0 && + (0==pPager->pDirty || 0==pPager->pDirty->pDirty) + ); + if( useAtomicWrite ){ + /* Update the nRec field in the journal file. */ + int offset = pPager->journalHdr + sizeof(aJournalMagic); + assert(pPager->nRec==1); + rc = write32bits(pPager->jfd, offset, pPager->nRec); + + /* Update the db file change counter. The following call will modify + ** the in-memory representation of page 1 to include the updated + ** change counter and then write page 1 directly to the database + ** file. Because of the atomic-write property of the host file-system, + ** this is safe. + */ + if( rc==SQLITE_OK ){ + rc = pager_incr_changecounter(pPager, 1); + } + }else{ + rc = sqlite3JournalCreate(pPager->jfd); + } + + if( !useAtomicWrite && rc==SQLITE_OK ) +#endif + + /* If a master journal file name has already been written to the + ** journal file, then no sync is required. This happens when it is + ** written, then the process fails to upgrade from a RESERVED to an + ** EXCLUSIVE lock. The next time the process tries to commit the + ** transaction the m-j name will have already been written. + */ + if( !pPager->setMaster ){ + assert( pPager->journalOpen ); + rc = pager_incr_changecounter(pPager, 0); + if( rc!=SQLITE_OK ) goto sync_exit; +#ifndef SQLITE_OMIT_AUTOVACUUM + if( nTrunc!=0 ){ + /* If this transaction has made the database smaller, then all pages + ** being discarded by the truncation must be written to the journal + ** file. + */ + Pgno i; + int iSkip = PAGER_MJ_PGNO(pPager); + for( i=nTrunc+1; i<=pPager->origDbSize; i++ ){ + if( !(pPager->aInJournal[i/8] & (1<<(i&7))) && i!=iSkip ){ + rc = sqlite3PagerGet(pPager, i, &pPg); + if( rc!=SQLITE_OK ) goto sync_exit; + rc = sqlite3PagerWrite(pPg); + sqlite3PagerUnref(pPg); + if( rc!=SQLITE_OK ) goto sync_exit; + } + } + } +#endif + rc = writeMasterJournal(pPager, zMaster); + if( rc!=SQLITE_OK ) goto sync_exit; + rc = syncJournal(pPager); + } + if( rc!=SQLITE_OK ) goto sync_exit; + +#ifndef SQLITE_OMIT_AUTOVACUUM + if( nTrunc!=0 ){ + rc = sqlite3PagerTruncate(pPager, nTrunc); + if( rc!=SQLITE_OK ) goto sync_exit; + } +#endif + + /* Write all dirty pages to the database file */ + pPg = pager_get_all_dirty_pages(pPager); + rc = pager_write_pagelist(pPg); + if( rc!=SQLITE_OK ){ + while( pPg && !pPg->dirty ){ pPg = pPg->pDirty; } + pPager->pDirty = pPg; + goto sync_exit; + } + pPager->pDirty = 0; + + /* Sync the database file. */ + if( !pPager->noSync ){ + rc = sqlite3OsSync(pPager->fd, pPager->sync_flags); + } + IOTRACE(("DBSYNC %p\n", pPager)) + + pPager->state = PAGER_SYNCED; + }else if( MEMDB && nTrunc!=0 ){ + rc = sqlite3PagerTruncate(pPager, nTrunc); + } + +sync_exit: + if( rc==SQLITE_IOERR_BLOCKED ){ + /* pager_incr_changecounter() may attempt to obtain an exclusive + * lock to spill the cache and return IOERR_BLOCKED. But since + * there is no chance the cache is inconsistent, it's + * better to return SQLITE_BUSY. + */ + rc = SQLITE_BUSY; + } + pagerLeave(pPager); + return rc; +} + + +/* +** Commit all changes to the database and release the write lock. +** +** If the commit fails for any reason, a rollback attempt is made +** and an error code is returned. If the commit worked, SQLITE_OK +** is returned. +*/ +int sqlite3PagerCommitPhaseTwo(Pager *pPager){ + int rc; + PgHdr *pPg; + + if( pPager->errCode ){ + return pPager->errCode; + } + if( pPager->statedirty = 0; + pPg->inJournal = 0; + pHist->inStmt = 0; + pPg->needSync = 0; + pHist->pPrevStmt = pHist->pNextStmt = 0; + pPg = pPg->pDirty; + } + pPager->pDirty = 0; +#ifndef NDEBUG + for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ + PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); + assert( !pPg->alwaysRollback ); + assert( !pHist->pOrig ); + assert( !pHist->pStmt ); + } +#endif + pPager->pStmt = 0; + pPager->state = PAGER_SHARED; + return SQLITE_OK; + } + assert( pPager->journalOpen || !pPager->dirtyCache ); + assert( pPager->state==PAGER_SYNCED || !pPager->dirtyCache ); + rc = pager_end_transaction(pPager); + rc = pager_error(pPager, rc); + pagerLeave(pPager); + return rc; +} + +/* +** Rollback all changes. The database falls back to PAGER_SHARED mode. +** All in-memory cache pages revert to their original data contents. +** The journal is deleted. +** +** This routine cannot fail unless some other process is not following +** the correct locking protocol or unless some other +** process is writing trash into the journal file (SQLITE_CORRUPT) or +** unless a prior malloc() failed (SQLITE_NOMEM). Appropriate error +** codes are returned for all these occasions. Otherwise, +** SQLITE_OK is returned. +*/ +int sqlite3PagerRollback(Pager *pPager){ + int rc; + PAGERTRACE2("ROLLBACK %d\n", PAGERID(pPager)); + if( MEMDB ){ + PgHdr *p; + for(p=pPager->pAll; p; p=p->pNextAll){ + PgHistory *pHist; + assert( !p->alwaysRollback ); + if( !p->dirty ){ + assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pOrig ); + assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pStmt ); + continue; + } + + pHist = PGHDR_TO_HIST(p, pPager); + if( pHist->pOrig ){ + memcpy(PGHDR_TO_DATA(p), pHist->pOrig, pPager->pageSize); + PAGERTRACE3("ROLLBACK-PAGE %d of %d\n", p->pgno, PAGERID(pPager)); + }else{ + PAGERTRACE3("PAGE %d is clean on %d\n", p->pgno, PAGERID(pPager)); + } + clearHistory(pHist); + p->dirty = 0; + p->inJournal = 0; + pHist->inStmt = 0; + pHist->pPrevStmt = pHist->pNextStmt = 0; + if( pPager->xReiniter ){ + pPager->xReiniter(p, pPager->pageSize); + } + } + pPager->pDirty = 0; + pPager->pStmt = 0; + pPager->dbSize = pPager->origDbSize; + pager_truncate_cache(pPager); + pPager->stmtInUse = 0; + pPager->state = PAGER_SHARED; + return SQLITE_OK; + } + + pagerEnter(pPager); + if( !pPager->dirtyCache || !pPager->journalOpen ){ + rc = pager_end_transaction(pPager); + pagerLeave(pPager); + return rc; + } + + if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ + if( pPager->state>=PAGER_EXCLUSIVE ){ + pager_playback(pPager, 0); + } + pagerLeave(pPager); + return pPager->errCode; + } + if( pPager->state==PAGER_RESERVED ){ + int rc2; + rc = pager_playback(pPager, 0); + rc2 = pager_end_transaction(pPager); + if( rc==SQLITE_OK ){ + rc = rc2; + } + }else{ + rc = pager_playback(pPager, 0); + } + /* pager_reset(pPager); */ + pPager->dbSize = -1; + + /* If an error occurs during a ROLLBACK, we can no longer trust the pager + ** cache. So call pager_error() on the way out to make any error + ** persistent. + */ + rc = pager_error(pPager, rc); + pagerLeave(pPager); + return rc; +} + +/* +** Return TRUE if the database file is opened read-only. Return FALSE +** if the database is (in theory) writable. +*/ +int sqlite3PagerIsreadonly(Pager *pPager){ + return pPager->readOnly; +} + +/* +** Return the number of references to the pager. +*/ +int sqlite3PagerRefcount(Pager *pPager){ + return pPager->nRef; +} + +#ifdef SQLITE_TEST +/* +** This routine is used for testing and analysis only. +*/ +int *sqlite3PagerStats(Pager *pPager){ + static int a[11]; + a[0] = pPager->nRef; + a[1] = pPager->nPage; + a[2] = pPager->mxPage; + a[3] = pPager->dbSize; + a[4] = pPager->state; + a[5] = pPager->errCode; + a[6] = pPager->nHit; + a[7] = pPager->nMiss; + a[8] = 0; /* Used to be pPager->nOvfl */ + a[9] = pPager->nRead; + a[10] = pPager->nWrite; + return a; +} +#endif + +/* +** Set the statement rollback point. +** +** This routine should be called with the transaction journal already +** open. A new statement journal is created that can be used to rollback +** changes of a single SQL command within a larger transaction. +*/ +static int pagerStmtBegin(Pager *pPager){ + int rc; + assert( !pPager->stmtInUse ); + assert( pPager->state>=PAGER_SHARED ); + assert( pPager->dbSize>=0 ); + PAGERTRACE2("STMT-BEGIN %d\n", PAGERID(pPager)); + if( MEMDB ){ + pPager->stmtInUse = 1; + pPager->stmtSize = pPager->dbSize; + return SQLITE_OK; + } + if( !pPager->journalOpen ){ + pPager->stmtAutoopen = 1; + return SQLITE_OK; + } + assert( pPager->journalOpen ); + pagerLeave(pPager); + assert( pPager->aInStmt==0 ); + pPager->aInStmt = sqlite3MallocZero( pPager->dbSize/8 + 1 ); + pagerEnter(pPager); + if( pPager->aInStmt==0 ){ + /* sqlite3OsLock(pPager->fd, SHARED_LOCK); */ + return SQLITE_NOMEM; + } +#ifndef NDEBUG + rc = sqlite3OsFileSize(pPager->jfd, &pPager->stmtJSize); + if( rc ) goto stmt_begin_failed; + assert( pPager->stmtJSize == pPager->journalOff ); +#endif + pPager->stmtJSize = pPager->journalOff; + pPager->stmtSize = pPager->dbSize; + pPager->stmtHdrOff = 0; + pPager->stmtCksum = pPager->cksumInit; + if( !pPager->stmtOpen ){ + rc = sqlite3PagerOpentemp(pPager->pVfs, pPager->stfd, pPager->zStmtJrnl, + SQLITE_OPEN_SUBJOURNAL); + if( rc ){ + goto stmt_begin_failed; + } + pPager->stmtOpen = 1; + pPager->stmtNRec = 0; + } + pPager->stmtInUse = 1; + return SQLITE_OK; + +stmt_begin_failed: + if( pPager->aInStmt ){ + sqlite3_free(pPager->aInStmt); + pPager->aInStmt = 0; + } + return rc; +} +int sqlite3PagerStmtBegin(Pager *pPager){ + int rc; + pagerEnter(pPager); + rc = pagerStmtBegin(pPager); + pagerLeave(pPager); + return rc; +} + +/* +** Commit a statement. +*/ +int sqlite3PagerStmtCommit(Pager *pPager){ + pagerEnter(pPager); + if( pPager->stmtInUse ){ + PgHdr *pPg, *pNext; + PAGERTRACE2("STMT-COMMIT %d\n", PAGERID(pPager)); + if( !MEMDB ){ + /* sqlite3OsTruncate(pPager->stfd, 0); */ + sqlite3_free( pPager->aInStmt ); + pPager->aInStmt = 0; + }else{ + for(pPg=pPager->pStmt; pPg; pPg=pNext){ + PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); + pNext = pHist->pNextStmt; + assert( pHist->inStmt ); + pHist->inStmt = 0; + pHist->pPrevStmt = pHist->pNextStmt = 0; + sqlite3_free(pHist->pStmt); + pHist->pStmt = 0; + } + } + pPager->stmtNRec = 0; + pPager->stmtInUse = 0; + pPager->pStmt = 0; + } + pPager->stmtAutoopen = 0; + pagerLeave(pPager); + return SQLITE_OK; +} + +/* +** Rollback a statement. +*/ +int sqlite3PagerStmtRollback(Pager *pPager){ + int rc; + pagerEnter(pPager); + if( pPager->stmtInUse ){ + PAGERTRACE2("STMT-ROLLBACK %d\n", PAGERID(pPager)); + if( MEMDB ){ + PgHdr *pPg; + PgHistory *pHist; + for(pPg=pPager->pStmt; pPg; pPg=pHist->pNextStmt){ + pHist = PGHDR_TO_HIST(pPg, pPager); + if( pHist->pStmt ){ + memcpy(PGHDR_TO_DATA(pPg), pHist->pStmt, pPager->pageSize); + sqlite3_free(pHist->pStmt); + pHist->pStmt = 0; + } + } + pPager->dbSize = pPager->stmtSize; + pager_truncate_cache(pPager); + rc = SQLITE_OK; + }else{ + rc = pager_stmt_playback(pPager); + } + sqlite3PagerStmtCommit(pPager); + }else{ + rc = SQLITE_OK; + } + pPager->stmtAutoopen = 0; + pagerLeave(pPager); + return rc; +} + +/* +** Return the full pathname of the database file. +*/ +const char *sqlite3PagerFilename(Pager *pPager){ + return pPager->zFilename; +} + +/* +** Return the VFS structure for the pager. +*/ +const sqlite3_vfs *sqlite3PagerVfs(Pager *pPager){ + return pPager->pVfs; +} + +/* +** Return the file handle for the database file associated +** with the pager. This might return NULL if the file has +** not yet been opened. +*/ +sqlite3_file *sqlite3PagerFile(Pager *pPager){ + return pPager->fd; +} + +/* +** Return the directory of the database file. +*/ +const char *sqlite3PagerDirname(Pager *pPager){ + return pPager->zDirectory; +} + +/* +** Return the full pathname of the journal file. +*/ +const char *sqlite3PagerJournalname(Pager *pPager){ + return pPager->zJournal; +} + +/* +** Return true if fsync() calls are disabled for this pager. Return FALSE +** if fsync()s are executed normally. +*/ +int sqlite3PagerNosync(Pager *pPager){ + return pPager->noSync; +} + +#ifdef SQLITE_HAS_CODEC +/* +** Set the codec for this pager +*/ +void sqlite3PagerSetCodec( + Pager *pPager, + void *(*xCodec)(void*,void*,Pgno,int), + void *pCodecArg +){ + pPager->xCodec = xCodec; + pPager->pCodecArg = pCodecArg; +} +#endif + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** Move the page pPg to location pgno in the file. +** +** There must be no references to the page previously located at +** pgno (which we call pPgOld) though that page is allowed to be +** in cache. If the page previous located at pgno is not already +** in the rollback journal, it is not put there by by this routine. +** +** References to the page pPg remain valid. Updating any +** meta-data associated with pPg (i.e. data stored in the nExtra bytes +** allocated along with the page) is the responsibility of the caller. +** +** A transaction must be active when this routine is called. It used to be +** required that a statement transaction was not active, but this restriction +** has been removed (CREATE INDEX needs to move a page when a statement +** transaction is active). +*/ +int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno){ + PgHdr *pPgOld; /* The page being overwritten. */ + int h; + Pgno needSyncPgno = 0; + + pagerEnter(pPager); + assert( pPg->nRef>0 ); + + PAGERTRACE5("MOVE %d page %d (needSync=%d) moves to %d\n", + PAGERID(pPager), pPg->pgno, pPg->needSync, pgno); + IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno)) + + pager_get_content(pPg); + if( pPg->needSync ){ + needSyncPgno = pPg->pgno; + assert( pPg->inJournal || (int)pgno>pPager->origDbSize ); + assert( pPg->dirty ); + assert( pPager->needSync ); + } + + /* Unlink pPg from it's hash-chain */ + unlinkHashChain(pPager, pPg); + + /* If the cache contains a page with page-number pgno, remove it + ** from it's hash chain. Also, if the PgHdr.needSync was set for + ** page pgno before the 'move' operation, it needs to be retained + ** for the page moved there. + */ + pPg->needSync = 0; + pPgOld = pager_lookup(pPager, pgno); + if( pPgOld ){ + assert( pPgOld->nRef==0 ); + unlinkHashChain(pPager, pPgOld); + makeClean(pPgOld); + pPg->needSync = pPgOld->needSync; + }else{ + pPg->needSync = 0; + } + if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ + pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; + }else{ + pPg->inJournal = 0; + assert( pPg->needSync==0 || (int)pgno>pPager->origDbSize ); + } + + /* Change the page number for pPg and insert it into the new hash-chain. */ + assert( pgno!=0 ); + pPg->pgno = pgno; + h = pgno & (pPager->nHash-1); + if( pPager->aHash[h] ){ + assert( pPager->aHash[h]->pPrevHash==0 ); + pPager->aHash[h]->pPrevHash = pPg; + } + pPg->pNextHash = pPager->aHash[h]; + pPager->aHash[h] = pPg; + pPg->pPrevHash = 0; + + makeDirty(pPg); + pPager->dirtyCache = 1; + + if( needSyncPgno ){ + /* If needSyncPgno is non-zero, then the journal file needs to be + ** sync()ed before any data is written to database file page needSyncPgno. + ** Currently, no such page exists in the page-cache and the + ** Pager.aInJournal bit has been set. This needs to be remedied by loading + ** the page into the pager-cache and setting the PgHdr.needSync flag. + ** + ** The sqlite3PagerGet() call may cause the journal to sync. So make + ** sure the Pager.needSync flag is set too. + */ + int rc; + PgHdr *pPgHdr; + assert( pPager->needSync ); + rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr); + if( rc!=SQLITE_OK ) return rc; + pPager->needSync = 1; + pPgHdr->needSync = 1; + pPgHdr->inJournal = 1; + makeDirty(pPgHdr); + sqlite3PagerUnref(pPgHdr); + } + + pagerLeave(pPager); + return SQLITE_OK; +} +#endif + +/* +** Return a pointer to the data for the specified page. +*/ +void *sqlite3PagerGetData(DbPage *pPg){ + return PGHDR_TO_DATA(pPg); +} + +/* +** Return a pointer to the Pager.nExtra bytes of "extra" space +** allocated along with the specified page. +*/ +void *sqlite3PagerGetExtra(DbPage *pPg){ + Pager *pPager = pPg->pPager; + return (pPager?PGHDR_TO_EXTRA(pPg, pPager):0); +} + +/* +** Get/set the locking-mode for this pager. Parameter eMode must be one +** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or +** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then +** the locking-mode is set to the value specified. +** +** The returned value is either PAGER_LOCKINGMODE_NORMAL or +** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated) +** locking-mode. +*/ +int sqlite3PagerLockingMode(Pager *pPager, int eMode){ + assert( eMode==PAGER_LOCKINGMODE_QUERY + || eMode==PAGER_LOCKINGMODE_NORMAL + || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); + assert( PAGER_LOCKINGMODE_QUERY<0 ); + assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); + if( eMode>=0 && !pPager->tempFile ){ + pPager->exclusiveMode = eMode; + } + return (int)pPager->exclusiveMode; +} + +#ifdef SQLITE_DEBUG +/* +** Print a listing of all referenced pages and their ref count. +*/ +void sqlite3PagerRefdump(Pager *pPager){ + PgHdr *pPg; + for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ + if( pPg->nRef<=0 ) continue; + sqlite3DebugPrintf("PAGE %3d addr=%p nRef=%d\n", + pPg->pgno, PGHDR_TO_DATA(pPg), pPg->nRef); + } +} +#endif + +#endif /* SQLITE_OMIT_DISKIO */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/pager.h b/libraries/sqlite/unix/sqlite-3.5.1/src/pager.h new file mode 100644 index 0000000000..cf05b11bee --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/pager.h @@ -0,0 +1,125 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the sqlite page cache +** subsystem. The page cache subsystem reads and writes a file a page +** at a time and provides a journal for rollback. +** +** @(#) $Id: pager.h,v 1.67 2007/09/03 15:19:35 drh Exp $ +*/ + +#ifndef _PAGER_H_ +#define _PAGER_H_ + +/* +** The type used to represent a page number. The first page in a file +** is called page 1. 0 is used to represent "not a page". +*/ +typedef unsigned int Pgno; + +/* +** Each open file is managed by a separate instance of the "Pager" structure. +*/ +typedef struct Pager Pager; + +/* +** Handle type for pages. +*/ +typedef struct PgHdr DbPage; + +/* +** Allowed values for the flags parameter to sqlite3PagerOpen(). +** +** NOTE: This values must match the corresponding BTREE_ values in btree.h. +*/ +#define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */ +#define PAGER_NO_READLOCK 0x0002 /* Omit readlocks on readonly files */ + +/* +** Valid values for the second argument to sqlite3PagerLockingMode(). +*/ +#define PAGER_LOCKINGMODE_QUERY -1 +#define PAGER_LOCKINGMODE_NORMAL 0 +#define PAGER_LOCKINGMODE_EXCLUSIVE 1 + +/* +** See source code comments for a detailed description of the following +** routines: +*/ +int sqlite3PagerOpen(sqlite3_vfs *, Pager **ppPager, const char*, int,int,int); +void sqlite3PagerSetBusyhandler(Pager*, BusyHandler *pBusyHandler); +void sqlite3PagerSetDestructor(Pager*, void(*)(DbPage*,int)); +void sqlite3PagerSetReiniter(Pager*, void(*)(DbPage*,int)); +int sqlite3PagerSetPagesize(Pager*, u16*); +int sqlite3PagerMaxPageCount(Pager*, int); +int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); +void sqlite3PagerSetCachesize(Pager*, int); +int sqlite3PagerClose(Pager *pPager); +int sqlite3PagerAcquire(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag); +#define sqlite3PagerGet(A,B,C) sqlite3PagerAcquire(A,B,C,0) +DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno); +int sqlite3PagerRef(DbPage*); +int sqlite3PagerUnref(DbPage*); +int sqlite3PagerWrite(DbPage*); +int sqlite3PagerOverwrite(Pager *pPager, Pgno pgno, void*); +int sqlite3PagerPagecount(Pager*); +int sqlite3PagerTruncate(Pager*,Pgno); +int sqlite3PagerBegin(DbPage*, int exFlag); +int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, Pgno); +int sqlite3PagerCommitPhaseTwo(Pager*); +int sqlite3PagerRollback(Pager*); +int sqlite3PagerIsreadonly(Pager*); +int sqlite3PagerStmtBegin(Pager*); +int sqlite3PagerStmtCommit(Pager*); +int sqlite3PagerStmtRollback(Pager*); +void sqlite3PagerDontRollback(DbPage*); +void sqlite3PagerDontWrite(DbPage*); +int sqlite3PagerRefcount(Pager*); +void sqlite3PagerSetSafetyLevel(Pager*,int,int); +const char *sqlite3PagerFilename(Pager*); +const sqlite3_vfs *sqlite3PagerVfs(Pager*); +sqlite3_file *sqlite3PagerFile(Pager*); +const char *sqlite3PagerDirname(Pager*); +const char *sqlite3PagerJournalname(Pager*); +int sqlite3PagerNosync(Pager*); +int sqlite3PagerMovepage(Pager*,DbPage*,Pgno); +void *sqlite3PagerGetData(DbPage *); +void *sqlite3PagerGetExtra(DbPage *); +int sqlite3PagerLockingMode(Pager *, int); + +#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) && !defined(SQLITE_OMIT_DISKIO) + int sqlite3PagerReleaseMemory(int); +#endif + +#ifdef SQLITE_HAS_CODEC + void sqlite3PagerSetCodec(Pager*,void*(*)(void*,void*,Pgno,int),void*); +#endif + +#if !defined(NDEBUG) || defined(SQLITE_TEST) + Pgno sqlite3PagerPagenumber(DbPage*); + int sqlite3PagerIswriteable(DbPage*); +#endif + +#ifdef SQLITE_TEST + int *sqlite3PagerStats(Pager*); + void sqlite3PagerRefdump(Pager*); + int pager3_refinfo_enable; +#endif + +#ifdef SQLITE_TEST +void disable_simulated_io_errors(void); +void enable_simulated_io_errors(void); +#else +# define disable_simulated_io_errors() +# define enable_simulated_io_errors() +#endif + +#endif /* _PAGER_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/parse.y b/libraries/sqlite/unix/sqlite-3.5.1/src/parse.y new file mode 100644 index 0000000000..5077daf31b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/parse.y @@ -0,0 +1,1114 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains SQLite's grammar for SQL. Process this file +** using the lemon parser generator to generate C code that runs +** the parser. Lemon will also generate a header file containing +** numeric codes for all of the tokens. +** +** @(#) $Id: parse.y,v 1.234 2007/08/21 10:44:16 drh Exp $ +*/ + +// All token codes are small integers with #defines that begin with "TK_" +%token_prefix TK_ + +// The type of the data attached to each token is Token. This is also the +// default type for non-terminals. +// +%token_type {Token} +%default_type {Token} + +// The generated parser function takes a 4th argument as follows: +%extra_argument {Parse *pParse} + +// This code runs whenever there is a syntax error +// +%syntax_error { + if( !pParse->parseError ){ + if( TOKEN.z[0] ){ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); + }else{ + sqlite3ErrorMsg(pParse, "incomplete SQL statement"); + } + pParse->parseError = 1; + } +} +%stack_overflow { + sqlite3ErrorMsg(pParse, "parser stack overflow"); + pParse->parseError = 1; +} + +// The name of the generated procedure that implements the parser +// is as follows: +%name sqlite3Parser + +// The following text is included near the beginning of the C source +// code file that implements the parser. +// +%include { +#include "sqliteInt.h" + +/* +** An instance of this structure holds information about the +** LIMIT clause of a SELECT statement. +*/ +struct LimitVal { + Expr *pLimit; /* The LIMIT expression. NULL if there is no limit */ + Expr *pOffset; /* The OFFSET expression. NULL if there is none */ +}; + +/* +** An instance of this structure is used to store the LIKE, +** GLOB, NOT LIKE, and NOT GLOB operators. +*/ +struct LikeOp { + Token eOperator; /* "like" or "glob" or "regexp" */ + int not; /* True if the NOT keyword is present */ +}; + +/* +** An instance of the following structure describes the event of a +** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT, +** TK_DELETE, or TK_INSTEAD. If the event is of the form +** +** UPDATE ON (a,b,c) +** +** Then the "b" IdList records the list "a,b,c". +*/ +struct TrigEvent { int a; IdList * b; }; + +/* +** An instance of this structure holds the ATTACH key and the key type. +*/ +struct AttachKey { int type; Token key; }; + +} // end %include + +// Input is a single SQL command +input ::= cmdlist. +cmdlist ::= cmdlist ecmd. +cmdlist ::= ecmd. +cmdx ::= cmd. { sqlite3FinishCoding(pParse); } +ecmd ::= SEMI. +ecmd ::= explain cmdx SEMI. +explain ::= . { sqlite3BeginParse(pParse, 0); } +%ifndef SQLITE_OMIT_EXPLAIN +explain ::= EXPLAIN. { sqlite3BeginParse(pParse, 1); } +explain ::= EXPLAIN QUERY PLAN. { sqlite3BeginParse(pParse, 2); } +%endif SQLITE_OMIT_EXPLAIN + +///////////////////// Begin and end transactions. //////////////////////////// +// + +cmd ::= BEGIN transtype(Y) trans_opt. {sqlite3BeginTransaction(pParse, Y);} +trans_opt ::= . +trans_opt ::= TRANSACTION. +trans_opt ::= TRANSACTION nm. +%type transtype {int} +transtype(A) ::= . {A = TK_DEFERRED;} +transtype(A) ::= DEFERRED(X). {A = @X;} +transtype(A) ::= IMMEDIATE(X). {A = @X;} +transtype(A) ::= EXCLUSIVE(X). {A = @X;} +cmd ::= COMMIT trans_opt. {sqlite3CommitTransaction(pParse);} +cmd ::= END trans_opt. {sqlite3CommitTransaction(pParse);} +cmd ::= ROLLBACK trans_opt. {sqlite3RollbackTransaction(pParse);} + +///////////////////// The CREATE TABLE statement //////////////////////////// +// +cmd ::= create_table create_table_args. +create_table ::= CREATE temp(T) TABLE ifnotexists(E) nm(Y) dbnm(Z). { + sqlite3StartTable(pParse,&Y,&Z,T,0,0,E); +} +%type ifnotexists {int} +ifnotexists(A) ::= . {A = 0;} +ifnotexists(A) ::= IF NOT EXISTS. {A = 1;} +%type temp {int} +%ifndef SQLITE_OMIT_TEMPDB +temp(A) ::= TEMP. {A = 1;} +%endif SQLITE_OMIT_TEMPDB +temp(A) ::= . {A = 0;} +create_table_args ::= LP columnlist conslist_opt(X) RP(Y). { + sqlite3EndTable(pParse,&X,&Y,0); +} +create_table_args ::= AS select(S). { + sqlite3EndTable(pParse,0,0,S); + sqlite3SelectDelete(S); +} +columnlist ::= columnlist COMMA column. +columnlist ::= column. + +// A "column" is a complete description of a single column in a +// CREATE TABLE statement. This includes the column name, its +// datatype, and other keywords such as PRIMARY KEY, UNIQUE, REFERENCES, +// NOT NULL and so forth. +// +column(A) ::= columnid(X) type carglist. { + A.z = X.z; + A.n = (pParse->sLastToken.z-X.z) + pParse->sLastToken.n; +} +columnid(A) ::= nm(X). { + sqlite3AddColumn(pParse,&X); + A = X; +} + + +// An IDENTIFIER can be a generic identifier, or one of several +// keywords. Any non-standard keyword can also be an identifier. +// +%type id {Token} +id(A) ::= ID(X). {A = X;} + +// The following directive causes tokens ABORT, AFTER, ASC, etc. to +// fallback to ID if they will not parse as their original value. +// This obviates the need for the "id" nonterminal. +// +%fallback ID + ABORT AFTER ANALYZE ASC ATTACH BEFORE BEGIN CASCADE CAST CONFLICT + DATABASE DEFERRED DESC DETACH EACH END EXCLUSIVE EXPLAIN FAIL FOR + IGNORE IMMEDIATE INITIALLY INSTEAD LIKE_KW MATCH PLAN + QUERY KEY OF OFFSET PRAGMA RAISE REPLACE RESTRICT ROW + TEMP TRIGGER VACUUM VIEW VIRTUAL +%ifdef SQLITE_OMIT_COMPOUND_SELECT + EXCEPT INTERSECT UNION +%endif SQLITE_OMIT_COMPOUND_SELECT + REINDEX RENAME CTIME_KW IF + . +%wildcard ANY. + +// Define operator precedence early so that this is the first occurance +// of the operator tokens in the grammer. Keeping the operators together +// causes them to be assigned integer values that are close together, +// which keeps parser tables smaller. +// +// The token values assigned to these symbols is determined by the order +// in which lemon first sees them. It must be the case that ISNULL/NOTNULL, +// NE/EQ, GT/LE, and GE/LT are separated by only a single value. See +// the sqlite3ExprIfFalse() routine for additional information on this +// constraint. +// +%left OR. +%left AND. +%right NOT. +%left IS MATCH LIKE_KW BETWEEN IN ISNULL NOTNULL NE EQ. +%left GT LE LT GE. +%right ESCAPE. +%left BITAND BITOR LSHIFT RSHIFT. +%left PLUS MINUS. +%left STAR SLASH REM. +%left CONCAT. +%left COLLATE. +%right UMINUS UPLUS BITNOT. + +// And "ids" is an identifer-or-string. +// +%type ids {Token} +ids(A) ::= ID|STRING(X). {A = X;} + +// The name of a column or table can be any of the following: +// +%type nm {Token} +nm(A) ::= ID(X). {A = X;} +nm(A) ::= STRING(X). {A = X;} +nm(A) ::= JOIN_KW(X). {A = X;} + +// A typetoken is really one or more tokens that form a type name such +// as can be found after the column name in a CREATE TABLE statement. +// Multiple tokens are concatenated to form the value of the typetoken. +// +%type typetoken {Token} +type ::= . +type ::= typetoken(X). {sqlite3AddColumnType(pParse,&X);} +typetoken(A) ::= typename(X). {A = X;} +typetoken(A) ::= typename(X) LP signed RP(Y). { + A.z = X.z; + A.n = &Y.z[Y.n] - X.z; +} +typetoken(A) ::= typename(X) LP signed COMMA signed RP(Y). { + A.z = X.z; + A.n = &Y.z[Y.n] - X.z; +} +%type typename {Token} +typename(A) ::= ids(X). {A = X;} +typename(A) ::= typename(X) ids(Y). {A.z=X.z; A.n=Y.n+(Y.z-X.z);} +signed ::= plus_num. +signed ::= minus_num. + +// "carglist" is a list of additional constraints that come after the +// column name and column type in a CREATE TABLE statement. +// +carglist ::= carglist carg. +carglist ::= . +carg ::= CONSTRAINT nm ccons. +carg ::= ccons. +ccons ::= DEFAULT term(X). {sqlite3AddDefaultValue(pParse,X);} +ccons ::= DEFAULT LP expr(X) RP. {sqlite3AddDefaultValue(pParse,X);} +ccons ::= DEFAULT PLUS term(X). {sqlite3AddDefaultValue(pParse,X);} +ccons ::= DEFAULT MINUS term(X). { + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, X, 0, 0); + sqlite3AddDefaultValue(pParse,p); +} +ccons ::= DEFAULT id(X). { + Expr *p = sqlite3PExpr(pParse, TK_STRING, 0, 0, &X); + sqlite3AddDefaultValue(pParse,p); +} + +// In addition to the type name, we also care about the primary key and +// UNIQUE constraints. +// +ccons ::= NULL onconf. +ccons ::= NOT NULL onconf(R). {sqlite3AddNotNull(pParse, R);} +ccons ::= PRIMARY KEY sortorder(Z) onconf(R) autoinc(I). + {sqlite3AddPrimaryKey(pParse,0,R,I,Z);} +ccons ::= UNIQUE onconf(R). {sqlite3CreateIndex(pParse,0,0,0,0,R,0,0,0,0);} +ccons ::= CHECK LP expr(X) RP. {sqlite3AddCheckConstraint(pParse,X);} +ccons ::= REFERENCES nm(T) idxlist_opt(TA) refargs(R). + {sqlite3CreateForeignKey(pParse,0,&T,TA,R);} +ccons ::= defer_subclause(D). {sqlite3DeferForeignKey(pParse,D);} +ccons ::= COLLATE id(C). {sqlite3AddCollateType(pParse, (char*)C.z, C.n);} + +// The optional AUTOINCREMENT keyword +%type autoinc {int} +autoinc(X) ::= . {X = 0;} +autoinc(X) ::= AUTOINCR. {X = 1;} + +// The next group of rules parses the arguments to a REFERENCES clause +// that determine if the referential integrity checking is deferred or +// or immediate and which determine what action to take if a ref-integ +// check fails. +// +%type refargs {int} +refargs(A) ::= . { A = OE_Restrict * 0x010101; } +refargs(A) ::= refargs(X) refarg(Y). { A = (X & Y.mask) | Y.value; } +%type refarg {struct {int value; int mask;}} +refarg(A) ::= MATCH nm. { A.value = 0; A.mask = 0x000000; } +refarg(A) ::= ON DELETE refact(X). { A.value = X; A.mask = 0x0000ff; } +refarg(A) ::= ON UPDATE refact(X). { A.value = X<<8; A.mask = 0x00ff00; } +refarg(A) ::= ON INSERT refact(X). { A.value = X<<16; A.mask = 0xff0000; } +%type refact {int} +refact(A) ::= SET NULL. { A = OE_SetNull; } +refact(A) ::= SET DEFAULT. { A = OE_SetDflt; } +refact(A) ::= CASCADE. { A = OE_Cascade; } +refact(A) ::= RESTRICT. { A = OE_Restrict; } +%type defer_subclause {int} +defer_subclause(A) ::= NOT DEFERRABLE init_deferred_pred_opt(X). {A = X;} +defer_subclause(A) ::= DEFERRABLE init_deferred_pred_opt(X). {A = X;} +%type init_deferred_pred_opt {int} +init_deferred_pred_opt(A) ::= . {A = 0;} +init_deferred_pred_opt(A) ::= INITIALLY DEFERRED. {A = 1;} +init_deferred_pred_opt(A) ::= INITIALLY IMMEDIATE. {A = 0;} + +// For the time being, the only constraint we care about is the primary +// key and UNIQUE. Both create indices. +// +conslist_opt(A) ::= . {A.n = 0; A.z = 0;} +conslist_opt(A) ::= COMMA(X) conslist. {A = X;} +conslist ::= conslist COMMA tcons. +conslist ::= conslist tcons. +conslist ::= tcons. +tcons ::= CONSTRAINT nm. +tcons ::= PRIMARY KEY LP idxlist(X) autoinc(I) RP onconf(R). + {sqlite3AddPrimaryKey(pParse,X,R,I,0);} +tcons ::= UNIQUE LP idxlist(X) RP onconf(R). + {sqlite3CreateIndex(pParse,0,0,0,X,R,0,0,0,0);} +tcons ::= CHECK LP expr(E) RP onconf. {sqlite3AddCheckConstraint(pParse,E);} +tcons ::= FOREIGN KEY LP idxlist(FA) RP + REFERENCES nm(T) idxlist_opt(TA) refargs(R) defer_subclause_opt(D). { + sqlite3CreateForeignKey(pParse, FA, &T, TA, R); + sqlite3DeferForeignKey(pParse, D); +} +%type defer_subclause_opt {int} +defer_subclause_opt(A) ::= . {A = 0;} +defer_subclause_opt(A) ::= defer_subclause(X). {A = X;} + +// The following is a non-standard extension that allows us to declare the +// default behavior when there is a constraint conflict. +// +%type onconf {int} +%type orconf {int} +%type resolvetype {int} +onconf(A) ::= . {A = OE_Default;} +onconf(A) ::= ON CONFLICT resolvetype(X). {A = X;} +orconf(A) ::= . {A = OE_Default;} +orconf(A) ::= OR resolvetype(X). {A = X;} +resolvetype(A) ::= raisetype(X). {A = X;} +resolvetype(A) ::= IGNORE. {A = OE_Ignore;} +resolvetype(A) ::= REPLACE. {A = OE_Replace;} + +////////////////////////// The DROP TABLE ///////////////////////////////////// +// +cmd ::= DROP TABLE ifexists(E) fullname(X). { + sqlite3DropTable(pParse, X, 0, E); +} +%type ifexists {int} +ifexists(A) ::= IF EXISTS. {A = 1;} +ifexists(A) ::= . {A = 0;} + +///////////////////// The CREATE VIEW statement ///////////////////////////// +// +%ifndef SQLITE_OMIT_VIEW +cmd ::= CREATE(X) temp(T) VIEW ifnotexists(E) nm(Y) dbnm(Z) AS select(S). { + sqlite3CreateView(pParse, &X, &Y, &Z, S, T, E); +} +cmd ::= DROP VIEW ifexists(E) fullname(X). { + sqlite3DropTable(pParse, X, 1, E); +} +%endif SQLITE_OMIT_VIEW + +//////////////////////// The SELECT statement ///////////////////////////////// +// +cmd ::= select(X). { + sqlite3Select(pParse, X, SRT_Callback, 0, 0, 0, 0, 0); + sqlite3SelectDelete(X); +} + +%type select {Select*} +%destructor select {sqlite3SelectDelete($$);} +%type oneselect {Select*} +%destructor oneselect {sqlite3SelectDelete($$);} + +select(A) ::= oneselect(X). {A = X;} +%ifndef SQLITE_OMIT_COMPOUND_SELECT +select(A) ::= select(X) multiselect_op(Y) oneselect(Z). { + if( Z ){ + Z->op = Y; + Z->pPrior = X; + }else{ + sqlite3SelectDelete(X); + } + A = Z; +} +%type multiselect_op {int} +multiselect_op(A) ::= UNION(OP). {A = @OP;} +multiselect_op(A) ::= UNION ALL. {A = TK_ALL;} +multiselect_op(A) ::= EXCEPT|INTERSECT(OP). {A = @OP;} +%endif SQLITE_OMIT_COMPOUND_SELECT +oneselect(A) ::= SELECT distinct(D) selcollist(W) from(X) where_opt(Y) + groupby_opt(P) having_opt(Q) orderby_opt(Z) limit_opt(L). { + A = sqlite3SelectNew(pParse,W,X,Y,P,Q,Z,D,L.pLimit,L.pOffset); +} + +// The "distinct" nonterminal is true (1) if the DISTINCT keyword is +// present and false (0) if it is not. +// +%type distinct {int} +distinct(A) ::= DISTINCT. {A = 1;} +distinct(A) ::= ALL. {A = 0;} +distinct(A) ::= . {A = 0;} + +// selcollist is a list of expressions that are to become the return +// values of the SELECT statement. The "*" in statements like +// "SELECT * FROM ..." is encoded as a special expression with an +// opcode of TK_ALL. +// +%type selcollist {ExprList*} +%destructor selcollist {sqlite3ExprListDelete($$);} +%type sclp {ExprList*} +%destructor sclp {sqlite3ExprListDelete($$);} +sclp(A) ::= selcollist(X) COMMA. {A = X;} +sclp(A) ::= . {A = 0;} +selcollist(A) ::= sclp(P) expr(X) as(Y). { + A = sqlite3ExprListAppend(pParse,P,X,Y.n?&Y:0); +} +selcollist(A) ::= sclp(P) STAR. { + Expr *p = sqlite3PExpr(pParse, TK_ALL, 0, 0, 0); + A = sqlite3ExprListAppend(pParse, P, p, 0); +} +selcollist(A) ::= sclp(P) nm(X) DOT STAR. { + Expr *pRight = sqlite3PExpr(pParse, TK_ALL, 0, 0, 0); + Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &X); + Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); + A = sqlite3ExprListAppend(pParse,P, pDot, 0); +} + +// An option "AS " phrase that can follow one of the expressions that +// define the result set, or one of the tables in the FROM clause. +// +%type as {Token} +as(X) ::= AS nm(Y). {X = Y;} +as(X) ::= ids(Y). {X = Y;} +as(X) ::= . {X.n = 0;} + + +%type seltablist {SrcList*} +%destructor seltablist {sqlite3SrcListDelete($$);} +%type stl_prefix {SrcList*} +%destructor stl_prefix {sqlite3SrcListDelete($$);} +%type from {SrcList*} +%destructor from {sqlite3SrcListDelete($$);} + +// A complete FROM clause. +// +from(A) ::= . {A = sqlite3DbMallocZero(pParse->db, sizeof(*A));} +from(A) ::= FROM seltablist(X). { + A = X; + sqlite3SrcListShiftJoinType(A); +} + +// "seltablist" is a "Select Table List" - the content of the FROM clause +// in a SELECT statement. "stl_prefix" is a prefix of this list. +// +stl_prefix(A) ::= seltablist(X) joinop(Y). { + A = X; + if( A && A->nSrc>0 ) A->a[A->nSrc-1].jointype = Y; +} +stl_prefix(A) ::= . {A = 0;} +seltablist(A) ::= stl_prefix(X) nm(Y) dbnm(D) as(Z) on_opt(N) using_opt(U). { + A = sqlite3SrcListAppendFromTerm(pParse,X,&Y,&D,&Z,0,N,U); +} +%ifndef SQLITE_OMIT_SUBQUERY + seltablist(A) ::= stl_prefix(X) LP seltablist_paren(S) RP + as(Z) on_opt(N) using_opt(U). { + A = sqlite3SrcListAppendFromTerm(pParse,X,0,0,&Z,S,N,U); + } + + // A seltablist_paren nonterminal represents anything in a FROM that + // is contained inside parentheses. This can be either a subquery or + // a grouping of table and subqueries. + // + %type seltablist_paren {Select*} + %destructor seltablist_paren {sqlite3SelectDelete($$);} + seltablist_paren(A) ::= select(S). {A = S;} + seltablist_paren(A) ::= seltablist(F). { + sqlite3SrcListShiftJoinType(F); + A = sqlite3SelectNew(pParse,0,F,0,0,0,0,0,0,0); + } +%endif SQLITE_OMIT_SUBQUERY + +%type dbnm {Token} +dbnm(A) ::= . {A.z=0; A.n=0;} +dbnm(A) ::= DOT nm(X). {A = X;} + +%type fullname {SrcList*} +%destructor fullname {sqlite3SrcListDelete($$);} +fullname(A) ::= nm(X) dbnm(Y). {A = sqlite3SrcListAppend(pParse->db,0,&X,&Y);} + +%type joinop {int} +%type joinop2 {int} +joinop(X) ::= COMMA|JOIN. { X = JT_INNER; } +joinop(X) ::= JOIN_KW(A) JOIN. { X = sqlite3JoinType(pParse,&A,0,0); } +joinop(X) ::= JOIN_KW(A) nm(B) JOIN. { X = sqlite3JoinType(pParse,&A,&B,0); } +joinop(X) ::= JOIN_KW(A) nm(B) nm(C) JOIN. + { X = sqlite3JoinType(pParse,&A,&B,&C); } + +%type on_opt {Expr*} +%destructor on_opt {sqlite3ExprDelete($$);} +on_opt(N) ::= ON expr(E). {N = E;} +on_opt(N) ::= . {N = 0;} + +%type using_opt {IdList*} +%destructor using_opt {sqlite3IdListDelete($$);} +using_opt(U) ::= USING LP inscollist(L) RP. {U = L;} +using_opt(U) ::= . {U = 0;} + + +%type orderby_opt {ExprList*} +%destructor orderby_opt {sqlite3ExprListDelete($$);} +%type sortlist {ExprList*} +%destructor sortlist {sqlite3ExprListDelete($$);} +%type sortitem {Expr*} +%destructor sortitem {sqlite3ExprDelete($$);} + +orderby_opt(A) ::= . {A = 0;} +orderby_opt(A) ::= ORDER BY sortlist(X). {A = X;} +sortlist(A) ::= sortlist(X) COMMA sortitem(Y) sortorder(Z). { + A = sqlite3ExprListAppend(pParse,X,Y,0); + if( A ) A->a[A->nExpr-1].sortOrder = Z; +} +sortlist(A) ::= sortitem(Y) sortorder(Z). { + A = sqlite3ExprListAppend(pParse,0,Y,0); + if( A && A->a ) A->a[0].sortOrder = Z; +} +sortitem(A) ::= expr(X). {A = X;} + +%type sortorder {int} + +sortorder(A) ::= ASC. {A = SQLITE_SO_ASC;} +sortorder(A) ::= DESC. {A = SQLITE_SO_DESC;} +sortorder(A) ::= . {A = SQLITE_SO_ASC;} + +%type groupby_opt {ExprList*} +%destructor groupby_opt {sqlite3ExprListDelete($$);} +groupby_opt(A) ::= . {A = 0;} +groupby_opt(A) ::= GROUP BY nexprlist(X). {A = X;} + +%type having_opt {Expr*} +%destructor having_opt {sqlite3ExprDelete($$);} +having_opt(A) ::= . {A = 0;} +having_opt(A) ::= HAVING expr(X). {A = X;} + +%type limit_opt {struct LimitVal} + +// The destructor for limit_opt will never fire in the current grammar. +// The limit_opt non-terminal only occurs at the end of a single production +// rule for SELECT statements. As soon as the rule that create the +// limit_opt non-terminal reduces, the SELECT statement rule will also +// reduce. So there is never a limit_opt non-terminal on the stack +// except as a transient. So there is never anything to destroy. +// +//%destructor limit_opt { +// sqlite3ExprDelete($$.pLimit); +// sqlite3ExprDelete($$.pOffset); +//} +limit_opt(A) ::= . {A.pLimit = 0; A.pOffset = 0;} +limit_opt(A) ::= LIMIT expr(X). {A.pLimit = X; A.pOffset = 0;} +limit_opt(A) ::= LIMIT expr(X) OFFSET expr(Y). + {A.pLimit = X; A.pOffset = Y;} +limit_opt(A) ::= LIMIT expr(X) COMMA expr(Y). + {A.pOffset = X; A.pLimit = Y;} + +/////////////////////////// The DELETE statement ///////////////////////////// +// +cmd ::= DELETE FROM fullname(X) where_opt(Y). {sqlite3DeleteFrom(pParse,X,Y);} + +%type where_opt {Expr*} +%destructor where_opt {sqlite3ExprDelete($$);} + +where_opt(A) ::= . {A = 0;} +where_opt(A) ::= WHERE expr(X). {A = X;} + +////////////////////////// The UPDATE command //////////////////////////////// +// +cmd ::= UPDATE orconf(R) fullname(X) SET setlist(Y) where_opt(Z). { + sqlite3ExprListCheckLength(pParse,Y,SQLITE_MAX_COLUMN,"set list"); + sqlite3Update(pParse,X,Y,Z,R); +} + +%type setlist {ExprList*} +%destructor setlist {sqlite3ExprListDelete($$);} + +setlist(A) ::= setlist(Z) COMMA nm(X) EQ expr(Y). + {A = sqlite3ExprListAppend(pParse,Z,Y,&X);} +setlist(A) ::= nm(X) EQ expr(Y). + {A = sqlite3ExprListAppend(pParse,0,Y,&X);} + +////////////////////////// The INSERT command ///////////////////////////////// +// +cmd ::= insert_cmd(R) INTO fullname(X) inscollist_opt(F) + VALUES LP itemlist(Y) RP. + {sqlite3Insert(pParse, X, Y, 0, F, R);} +cmd ::= insert_cmd(R) INTO fullname(X) inscollist_opt(F) select(S). + {sqlite3Insert(pParse, X, 0, S, F, R);} +cmd ::= insert_cmd(R) INTO fullname(X) inscollist_opt(F) DEFAULT VALUES. + {sqlite3Insert(pParse, X, 0, 0, F, R);} + +%type insert_cmd {int} +insert_cmd(A) ::= INSERT orconf(R). {A = R;} +insert_cmd(A) ::= REPLACE. {A = OE_Replace;} + + +%type itemlist {ExprList*} +%destructor itemlist {sqlite3ExprListDelete($$);} + +itemlist(A) ::= itemlist(X) COMMA expr(Y). + {A = sqlite3ExprListAppend(pParse,X,Y,0);} +itemlist(A) ::= expr(X). + {A = sqlite3ExprListAppend(pParse,0,X,0);} + +%type inscollist_opt {IdList*} +%destructor inscollist_opt {sqlite3IdListDelete($$);} +%type inscollist {IdList*} +%destructor inscollist {sqlite3IdListDelete($$);} + +inscollist_opt(A) ::= . {A = 0;} +inscollist_opt(A) ::= LP inscollist(X) RP. {A = X;} +inscollist(A) ::= inscollist(X) COMMA nm(Y). + {A = sqlite3IdListAppend(pParse->db,X,&Y);} +inscollist(A) ::= nm(Y). + {A = sqlite3IdListAppend(pParse->db,0,&Y);} + +/////////////////////////// Expression Processing ///////////////////////////// +// + +%type expr {Expr*} +%destructor expr {sqlite3ExprDelete($$);} +%type term {Expr*} +%destructor term {sqlite3ExprDelete($$);} + +expr(A) ::= term(X). {A = X;} +expr(A) ::= LP(B) expr(X) RP(E). {A = X; sqlite3ExprSpan(A,&B,&E); } +term(A) ::= NULL(X). {A = sqlite3PExpr(pParse, @X, 0, 0, &X);} +expr(A) ::= ID(X). {A = sqlite3PExpr(pParse, TK_ID, 0, 0, &X);} +expr(A) ::= JOIN_KW(X). {A = sqlite3PExpr(pParse, TK_ID, 0, 0, &X);} +expr(A) ::= nm(X) DOT nm(Y). { + Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &X); + Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &Y); + A = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0); +} +expr(A) ::= nm(X) DOT nm(Y) DOT nm(Z). { + Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &X); + Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &Y); + Expr *temp3 = sqlite3PExpr(pParse, TK_ID, 0, 0, &Z); + Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0); + A = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0); +} +term(A) ::= INTEGER|FLOAT|BLOB(X). {A = sqlite3PExpr(pParse, @X, 0, 0, &X);} +term(A) ::= STRING(X). {A = sqlite3PExpr(pParse, @X, 0, 0, &X);} +expr(A) ::= REGISTER(X). {A = sqlite3RegisterExpr(pParse, &X);} +expr(A) ::= VARIABLE(X). { + Token *pToken = &X; + Expr *pExpr = A = sqlite3PExpr(pParse, TK_VARIABLE, 0, 0, pToken); + sqlite3ExprAssignVarNumber(pParse, pExpr); +} +expr(A) ::= expr(E) COLLATE id(C). { + A = sqlite3ExprSetColl(pParse, E, &C); +} +%ifndef SQLITE_OMIT_CAST +expr(A) ::= CAST(X) LP expr(E) AS typetoken(T) RP(Y). { + A = sqlite3PExpr(pParse, TK_CAST, E, 0, &T); + sqlite3ExprSpan(A,&X,&Y); +} +%endif SQLITE_OMIT_CAST +expr(A) ::= ID(X) LP distinct(D) exprlist(Y) RP(E). { + if( Y && Y->nExpr>SQLITE_MAX_FUNCTION_ARG ){ + sqlite3ErrorMsg(pParse, "too many arguments on function %T", &X); + } + A = sqlite3ExprFunction(pParse, Y, &X); + sqlite3ExprSpan(A,&X,&E); + if( D && A ){ + A->flags |= EP_Distinct; + } +} +expr(A) ::= ID(X) LP STAR RP(E). { + A = sqlite3ExprFunction(pParse, 0, &X); + sqlite3ExprSpan(A,&X,&E); +} +term(A) ::= CTIME_KW(OP). { + /* The CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP values are + ** treated as functions that return constants */ + A = sqlite3ExprFunction(pParse, 0,&OP); + if( A ){ + A->op = TK_CONST_FUNC; + A->span = OP; + } +} +expr(A) ::= expr(X) AND(OP) expr(Y). {A = sqlite3PExpr(pParse,@OP,X,Y,0);} +expr(A) ::= expr(X) OR(OP) expr(Y). {A = sqlite3PExpr(pParse,@OP,X,Y,0);} +expr(A) ::= expr(X) LT|GT|GE|LE(OP) expr(Y). + {A = sqlite3PExpr(pParse,@OP,X,Y,0);} +expr(A) ::= expr(X) EQ|NE(OP) expr(Y). {A = sqlite3PExpr(pParse,@OP,X,Y,0);} +expr(A) ::= expr(X) BITAND|BITOR|LSHIFT|RSHIFT(OP) expr(Y). + {A = sqlite3PExpr(pParse,@OP,X,Y,0);} +expr(A) ::= expr(X) PLUS|MINUS(OP) expr(Y).{A = sqlite3PExpr(pParse,@OP,X,Y,0);} +expr(A) ::= expr(X) STAR|SLASH|REM(OP) expr(Y). + {A = sqlite3PExpr(pParse,@OP,X,Y,0);} +expr(A) ::= expr(X) CONCAT(OP) expr(Y). {A = sqlite3PExpr(pParse,@OP,X,Y,0);} +%type likeop {struct LikeOp} +likeop(A) ::= LIKE_KW(X). {A.eOperator = X; A.not = 0;} +likeop(A) ::= NOT LIKE_KW(X). {A.eOperator = X; A.not = 1;} +likeop(A) ::= MATCH(X). {A.eOperator = X; A.not = 0;} +likeop(A) ::= NOT MATCH(X). {A.eOperator = X; A.not = 1;} +%type escape {Expr*} +%destructor escape {sqlite3ExprDelete($$);} +escape(X) ::= ESCAPE expr(A). [ESCAPE] {X = A;} +escape(X) ::= . [ESCAPE] {X = 0;} +expr(A) ::= expr(X) likeop(OP) expr(Y) escape(E). [LIKE_KW] { + ExprList *pList; + pList = sqlite3ExprListAppend(pParse,0, Y, 0); + pList = sqlite3ExprListAppend(pParse,pList, X, 0); + if( E ){ + pList = sqlite3ExprListAppend(pParse,pList, E, 0); + } + A = sqlite3ExprFunction(pParse, pList, &OP.eOperator); + if( OP.not ) A = sqlite3PExpr(pParse, TK_NOT, A, 0, 0); + sqlite3ExprSpan(A, &X->span, &Y->span); + if( A ) A->flags |= EP_InfixFunc; +} + +expr(A) ::= expr(X) ISNULL|NOTNULL(E). { + A = sqlite3PExpr(pParse, @E, X, 0, 0); + sqlite3ExprSpan(A,&X->span,&E); +} +expr(A) ::= expr(X) IS NULL(E). { + A = sqlite3PExpr(pParse, TK_ISNULL, X, 0, 0); + sqlite3ExprSpan(A,&X->span,&E); +} +expr(A) ::= expr(X) NOT NULL(E). { + A = sqlite3PExpr(pParse, TK_NOTNULL, X, 0, 0); + sqlite3ExprSpan(A,&X->span,&E); +} +expr(A) ::= expr(X) IS NOT NULL(E). { + A = sqlite3PExpr(pParse, TK_NOTNULL, X, 0, 0); + sqlite3ExprSpan(A,&X->span,&E); +} +expr(A) ::= NOT|BITNOT(B) expr(X). { + A = sqlite3PExpr(pParse, @B, X, 0, 0); + sqlite3ExprSpan(A,&B,&X->span); +} +expr(A) ::= MINUS(B) expr(X). [UMINUS] { + A = sqlite3PExpr(pParse, TK_UMINUS, X, 0, 0); + sqlite3ExprSpan(A,&B,&X->span); +} +expr(A) ::= PLUS(B) expr(X). [UPLUS] { + A = sqlite3PExpr(pParse, TK_UPLUS, X, 0, 0); + sqlite3ExprSpan(A,&B,&X->span); +} +%type between_op {int} +between_op(A) ::= BETWEEN. {A = 0;} +between_op(A) ::= NOT BETWEEN. {A = 1;} +expr(A) ::= expr(W) between_op(N) expr(X) AND expr(Y). [BETWEEN] { + ExprList *pList = sqlite3ExprListAppend(pParse,0, X, 0); + pList = sqlite3ExprListAppend(pParse,pList, Y, 0); + A = sqlite3PExpr(pParse, TK_BETWEEN, W, 0, 0); + if( A ){ + A->pList = pList; + }else{ + sqlite3ExprListDelete(pList); + } + if( N ) A = sqlite3PExpr(pParse, TK_NOT, A, 0, 0); + sqlite3ExprSpan(A,&W->span,&Y->span); +} +%ifndef SQLITE_OMIT_SUBQUERY + %type in_op {int} + in_op(A) ::= IN. {A = 0;} + in_op(A) ::= NOT IN. {A = 1;} + expr(A) ::= expr(X) in_op(N) LP exprlist(Y) RP(E). [IN] { + A = sqlite3PExpr(pParse, TK_IN, X, 0, 0); + if( A ){ + A->pList = Y; + sqlite3ExprSetHeight(A); + }else{ + sqlite3ExprListDelete(Y); + } + if( N ) A = sqlite3PExpr(pParse, TK_NOT, A, 0, 0); + sqlite3ExprSpan(A,&X->span,&E); + } + expr(A) ::= LP(B) select(X) RP(E). { + A = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0); + if( A ){ + A->pSelect = X; + sqlite3ExprSetHeight(A); + }else{ + sqlite3SelectDelete(X); + } + sqlite3ExprSpan(A,&B,&E); + } + expr(A) ::= expr(X) in_op(N) LP select(Y) RP(E). [IN] { + A = sqlite3PExpr(pParse, TK_IN, X, 0, 0); + if( A ){ + A->pSelect = Y; + sqlite3ExprSetHeight(A); + }else{ + sqlite3SelectDelete(Y); + } + if( N ) A = sqlite3PExpr(pParse, TK_NOT, A, 0, 0); + sqlite3ExprSpan(A,&X->span,&E); + } + expr(A) ::= expr(X) in_op(N) nm(Y) dbnm(Z). [IN] { + SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&Y,&Z); + A = sqlite3PExpr(pParse, TK_IN, X, 0, 0); + if( A ){ + A->pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); + sqlite3ExprSetHeight(A); + }else{ + sqlite3SrcListDelete(pSrc); + } + if( N ) A = sqlite3PExpr(pParse, TK_NOT, A, 0, 0); + sqlite3ExprSpan(A,&X->span,Z.z?&Z:&Y); + } + expr(A) ::= EXISTS(B) LP select(Y) RP(E). { + Expr *p = A = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0); + if( p ){ + p->pSelect = Y; + sqlite3ExprSpan(p,&B,&E); + sqlite3ExprSetHeight(A); + }else{ + sqlite3SelectDelete(Y); + } + } +%endif SQLITE_OMIT_SUBQUERY + +/* CASE expressions */ +expr(A) ::= CASE(C) case_operand(X) case_exprlist(Y) case_else(Z) END(E). { + A = sqlite3PExpr(pParse, TK_CASE, X, Z, 0); + if( A ){ + A->pList = Y; + sqlite3ExprSetHeight(A); + }else{ + sqlite3ExprListDelete(Y); + } + sqlite3ExprSpan(A, &C, &E); +} +%type case_exprlist {ExprList*} +%destructor case_exprlist {sqlite3ExprListDelete($$);} +case_exprlist(A) ::= case_exprlist(X) WHEN expr(Y) THEN expr(Z). { + A = sqlite3ExprListAppend(pParse,X, Y, 0); + A = sqlite3ExprListAppend(pParse,A, Z, 0); +} +case_exprlist(A) ::= WHEN expr(Y) THEN expr(Z). { + A = sqlite3ExprListAppend(pParse,0, Y, 0); + A = sqlite3ExprListAppend(pParse,A, Z, 0); +} +%type case_else {Expr*} +%destructor case_else {sqlite3ExprDelete($$);} +case_else(A) ::= ELSE expr(X). {A = X;} +case_else(A) ::= . {A = 0;} +%type case_operand {Expr*} +%destructor case_operand {sqlite3ExprDelete($$);} +case_operand(A) ::= expr(X). {A = X;} +case_operand(A) ::= . {A = 0;} + +%type exprlist {ExprList*} +%destructor exprlist {sqlite3ExprListDelete($$);} +%type nexprlist {ExprList*} +%destructor nexprlist {sqlite3ExprListDelete($$);} + +exprlist(A) ::= nexprlist(X). {A = X;} +exprlist(A) ::= . {A = 0;} +nexprlist(A) ::= nexprlist(X) COMMA expr(Y). + {A = sqlite3ExprListAppend(pParse,X,Y,0);} +nexprlist(A) ::= expr(Y). + {A = sqlite3ExprListAppend(pParse,0,Y,0);} + + +///////////////////////////// The CREATE INDEX command /////////////////////// +// +cmd ::= CREATE(S) uniqueflag(U) INDEX ifnotexists(NE) nm(X) dbnm(D) + ON nm(Y) LP idxlist(Z) RP(E). { + sqlite3CreateIndex(pParse, &X, &D, + sqlite3SrcListAppend(pParse->db,0,&Y,0), Z, U, + &S, &E, SQLITE_SO_ASC, NE); +} + +%type uniqueflag {int} +uniqueflag(A) ::= UNIQUE. {A = OE_Abort;} +uniqueflag(A) ::= . {A = OE_None;} + +%type idxlist {ExprList*} +%destructor idxlist {sqlite3ExprListDelete($$);} +%type idxlist_opt {ExprList*} +%destructor idxlist_opt {sqlite3ExprListDelete($$);} +%type idxitem {Token} + +idxlist_opt(A) ::= . {A = 0;} +idxlist_opt(A) ::= LP idxlist(X) RP. {A = X;} +idxlist(A) ::= idxlist(X) COMMA idxitem(Y) collate(C) sortorder(Z). { + Expr *p = 0; + if( C.n>0 ){ + p = sqlite3PExpr(pParse, TK_COLUMN, 0, 0, 0); + if( p ) p->pColl = sqlite3LocateCollSeq(pParse, (char*)C.z, C.n); + } + A = sqlite3ExprListAppend(pParse,X, p, &Y); + sqlite3ExprListCheckLength(pParse, A, SQLITE_MAX_COLUMN, "index"); + if( A ) A->a[A->nExpr-1].sortOrder = Z; +} +idxlist(A) ::= idxitem(Y) collate(C) sortorder(Z). { + Expr *p = 0; + if( C.n>0 ){ + p = sqlite3PExpr(pParse, TK_COLUMN, 0, 0, 0); + if( p ) p->pColl = sqlite3LocateCollSeq(pParse, (char*)C.z, C.n); + } + A = sqlite3ExprListAppend(pParse,0, p, &Y); + sqlite3ExprListCheckLength(pParse, A, SQLITE_MAX_COLUMN, "index"); + if( A ) A->a[A->nExpr-1].sortOrder = Z; +} +idxitem(A) ::= nm(X). {A = X;} + +%type collate {Token} +collate(C) ::= . {C.z = 0; C.n = 0;} +collate(C) ::= COLLATE id(X). {C = X;} + + +///////////////////////////// The DROP INDEX command ///////////////////////// +// +cmd ::= DROP INDEX ifexists(E) fullname(X). {sqlite3DropIndex(pParse, X, E);} + +///////////////////////////// The VACUUM command ///////////////////////////// +// +%ifndef SQLITE_OMIT_VACUUM +%ifndef SQLITE_OMIT_ATTACH +cmd ::= VACUUM. {sqlite3Vacuum(pParse);} +cmd ::= VACUUM nm. {sqlite3Vacuum(pParse);} +%endif SQLITE_OMIT_ATTACH +%endif SQLITE_OMIT_VACUUM + +///////////////////////////// The PRAGMA command ///////////////////////////// +// +%ifndef SQLITE_OMIT_PRAGMA +cmd ::= PRAGMA nm(X) dbnm(Z) EQ nmnum(Y). {sqlite3Pragma(pParse,&X,&Z,&Y,0);} +cmd ::= PRAGMA nm(X) dbnm(Z) EQ ON(Y). {sqlite3Pragma(pParse,&X,&Z,&Y,0);} +cmd ::= PRAGMA nm(X) dbnm(Z) EQ minus_num(Y). { + sqlite3Pragma(pParse,&X,&Z,&Y,1); +} +cmd ::= PRAGMA nm(X) dbnm(Z) LP nmnum(Y) RP. {sqlite3Pragma(pParse,&X,&Z,&Y,0);} +cmd ::= PRAGMA nm(X) dbnm(Z). {sqlite3Pragma(pParse,&X,&Z,0,0);} +nmnum(A) ::= plus_num(X). {A = X;} +nmnum(A) ::= nm(X). {A = X;} +%endif SQLITE_OMIT_PRAGMA +plus_num(A) ::= plus_opt number(X). {A = X;} +minus_num(A) ::= MINUS number(X). {A = X;} +number(A) ::= INTEGER|FLOAT(X). {A = X;} +plus_opt ::= PLUS. +plus_opt ::= . + +//////////////////////////// The CREATE TRIGGER command ///////////////////// + +%ifndef SQLITE_OMIT_TRIGGER + +cmd ::= CREATE trigger_decl(A) BEGIN trigger_cmd_list(S) END(Z). { + Token all; + all.z = A.z; + all.n = (Z.z - A.z) + Z.n; + sqlite3FinishTrigger(pParse, S, &all); +} + +trigger_decl(A) ::= temp(T) TRIGGER ifnotexists(NOERR) nm(B) dbnm(Z) + trigger_time(C) trigger_event(D) + ON fullname(E) foreach_clause when_clause(G). { + sqlite3BeginTrigger(pParse, &B, &Z, C, D.a, D.b, E, G, T, NOERR); + A = (Z.n==0?B:Z); +} + +%type trigger_time {int} +trigger_time(A) ::= BEFORE. { A = TK_BEFORE; } +trigger_time(A) ::= AFTER. { A = TK_AFTER; } +trigger_time(A) ::= INSTEAD OF. { A = TK_INSTEAD;} +trigger_time(A) ::= . { A = TK_BEFORE; } + +%type trigger_event {struct TrigEvent} +%destructor trigger_event {sqlite3IdListDelete($$.b);} +trigger_event(A) ::= DELETE|INSERT(OP). {A.a = @OP; A.b = 0;} +trigger_event(A) ::= UPDATE(OP). {A.a = @OP; A.b = 0;} +trigger_event(A) ::= UPDATE OF inscollist(X). {A.a = TK_UPDATE; A.b = X;} + +foreach_clause ::= . +foreach_clause ::= FOR EACH ROW. + +%type when_clause {Expr*} +%destructor when_clause {sqlite3ExprDelete($$);} +when_clause(A) ::= . { A = 0; } +when_clause(A) ::= WHEN expr(X). { A = X; } + +%type trigger_cmd_list {TriggerStep*} +%destructor trigger_cmd_list {sqlite3DeleteTriggerStep($$);} +trigger_cmd_list(A) ::= trigger_cmd_list(Y) trigger_cmd(X) SEMI. { + if( Y ){ + Y->pLast->pNext = X; + }else{ + Y = X; + } + Y->pLast = X; + A = Y; +} +trigger_cmd_list(A) ::= . { A = 0; } + +%type trigger_cmd {TriggerStep*} +%destructor trigger_cmd {sqlite3DeleteTriggerStep($$);} +// UPDATE +trigger_cmd(A) ::= UPDATE orconf(R) nm(X) SET setlist(Y) where_opt(Z). + { A = sqlite3TriggerUpdateStep(pParse->db, &X, Y, Z, R); } + +// INSERT +trigger_cmd(A) ::= insert_cmd(R) INTO nm(X) inscollist_opt(F) + VALUES LP itemlist(Y) RP. + {A = sqlite3TriggerInsertStep(pParse->db, &X, F, Y, 0, R);} + +trigger_cmd(A) ::= insert_cmd(R) INTO nm(X) inscollist_opt(F) select(S). + {A = sqlite3TriggerInsertStep(pParse->db, &X, F, 0, S, R);} + +// DELETE +trigger_cmd(A) ::= DELETE FROM nm(X) where_opt(Y). + {A = sqlite3TriggerDeleteStep(pParse->db, &X, Y);} + +// SELECT +trigger_cmd(A) ::= select(X). {A = sqlite3TriggerSelectStep(pParse->db, X); } + +// The special RAISE expression that may occur in trigger programs +expr(A) ::= RAISE(X) LP IGNORE RP(Y). { + A = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0); + if( A ){ + A->iColumn = OE_Ignore; + sqlite3ExprSpan(A, &X, &Y); + } +} +expr(A) ::= RAISE(X) LP raisetype(T) COMMA nm(Z) RP(Y). { + A = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &Z); + if( A ) { + A->iColumn = T; + sqlite3ExprSpan(A, &X, &Y); + } +} +%endif !SQLITE_OMIT_TRIGGER + +%type raisetype {int} +raisetype(A) ::= ROLLBACK. {A = OE_Rollback;} +raisetype(A) ::= ABORT. {A = OE_Abort;} +raisetype(A) ::= FAIL. {A = OE_Fail;} + + +//////////////////////// DROP TRIGGER statement ////////////////////////////// +%ifndef SQLITE_OMIT_TRIGGER +cmd ::= DROP TRIGGER ifexists(NOERR) fullname(X). { + sqlite3DropTrigger(pParse,X,NOERR); +} +%endif !SQLITE_OMIT_TRIGGER + +//////////////////////// ATTACH DATABASE file AS name ///////////////////////// +%ifndef SQLITE_OMIT_ATTACH +cmd ::= ATTACH database_kw_opt expr(F) AS expr(D) key_opt(K). { + sqlite3Attach(pParse, F, D, K); +} +cmd ::= DETACH database_kw_opt expr(D). { + sqlite3Detach(pParse, D); +} + +%type key_opt {Expr *} +%destructor key_opt {sqlite3ExprDelete($$);} +key_opt(A) ::= . { A = 0; } +key_opt(A) ::= KEY expr(X). { A = X; } + +database_kw_opt ::= DATABASE. +database_kw_opt ::= . +%endif SQLITE_OMIT_ATTACH + +////////////////////////// REINDEX collation ////////////////////////////////// +%ifndef SQLITE_OMIT_REINDEX +cmd ::= REINDEX. {sqlite3Reindex(pParse, 0, 0);} +cmd ::= REINDEX nm(X) dbnm(Y). {sqlite3Reindex(pParse, &X, &Y);} +%endif SQLITE_OMIT_REINDEX + +/////////////////////////////////// ANALYZE /////////////////////////////////// +%ifndef SQLITE_OMIT_ANALYZE +cmd ::= ANALYZE. {sqlite3Analyze(pParse, 0, 0);} +cmd ::= ANALYZE nm(X) dbnm(Y). {sqlite3Analyze(pParse, &X, &Y);} +%endif + +//////////////////////// ALTER TABLE table ... //////////////////////////////// +%ifndef SQLITE_OMIT_ALTERTABLE +cmd ::= ALTER TABLE fullname(X) RENAME TO nm(Z). { + sqlite3AlterRenameTable(pParse,X,&Z); +} +cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column(Y). { + sqlite3AlterFinishAddColumn(pParse, &Y); +} +add_column_fullname ::= fullname(X). { + sqlite3AlterBeginAddColumn(pParse, X); +} +kwcolumn_opt ::= . +kwcolumn_opt ::= COLUMNKW. +%endif SQLITE_OMIT_ALTERTABLE + +//////////////////////// CREATE VIRTUAL TABLE ... ///////////////////////////// +%ifndef SQLITE_OMIT_VIRTUALTABLE +cmd ::= create_vtab. {sqlite3VtabFinishParse(pParse,0);} +cmd ::= create_vtab LP vtabarglist RP(X). {sqlite3VtabFinishParse(pParse,&X);} +create_vtab ::= CREATE VIRTUAL TABLE nm(X) dbnm(Y) USING nm(Z). { + sqlite3VtabBeginParse(pParse, &X, &Y, &Z); +} +vtabarglist ::= vtabarg. +vtabarglist ::= vtabarglist COMMA vtabarg. +vtabarg ::= . {sqlite3VtabArgInit(pParse);} +vtabarg ::= vtabarg vtabargtoken. +vtabargtoken ::= ANY(X). {sqlite3VtabArgExtend(pParse,&X);} +vtabargtoken ::= lp anylist RP(X). {sqlite3VtabArgExtend(pParse,&X);} +lp ::= LP(X). {sqlite3VtabArgExtend(pParse,&X);} +anylist ::= . +anylist ::= anylist ANY(X). {sqlite3VtabArgExtend(pParse,&X);} +%endif SQLITE_OMIT_VIRTUALTABLE diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/pragma.c b/libraries/sqlite/unix/sqlite-3.5.1/src/pragma.c new file mode 100644 index 0000000000..b4d9774c1a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/pragma.c @@ -0,0 +1,1186 @@ +/* +** 2003 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to implement the PRAGMA command. +** +** $Id: pragma.c,v 1.149 2007/08/31 18:34:59 drh Exp $ +*/ +#include "sqliteInt.h" +#include + +/* Ignore this whole file if pragmas are disabled +*/ +#if !defined(SQLITE_OMIT_PRAGMA) && !defined(SQLITE_OMIT_PARSER) + +/* +** Interpret the given string as a safety level. Return 0 for OFF, +** 1 for ON or NORMAL and 2 for FULL. Return 1 for an empty or +** unrecognized string argument. +** +** Note that the values returned are one less that the values that +** should be passed into sqlite3BtreeSetSafetyLevel(). The is done +** to support legacy SQL code. The safety level used to be boolean +** and older scripts may have used numbers 0 for OFF and 1 for ON. +*/ +static int getSafetyLevel(const char *z){ + /* 123456789 123456789 */ + static const char zText[] = "onoffalseyestruefull"; + static const u8 iOffset[] = {0, 1, 2, 4, 9, 12, 16}; + static const u8 iLength[] = {2, 2, 3, 5, 3, 4, 4}; + static const u8 iValue[] = {1, 0, 0, 0, 1, 1, 2}; + int i, n; + if( isdigit(*z) ){ + return atoi(z); + } + n = strlen(z); + for(i=0; i=0&&i<=2)?i:0); +} +#endif /* ifndef SQLITE_OMIT_AUTOVACUUM */ + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* +** Interpret the given string as a temp db location. Return 1 for file +** backed temporary databases, 2 for the Red-Black tree in memory database +** and 0 to use the compile-time default. +*/ +static int getTempStore(const char *z){ + if( z[0]>='0' && z[0]<='2' ){ + return z[0] - '0'; + }else if( sqlite3StrICmp(z, "file")==0 ){ + return 1; + }else if( sqlite3StrICmp(z, "memory")==0 ){ + return 2; + }else{ + return 0; + } +} +#endif /* SQLITE_PAGER_PRAGMAS */ + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* +** Invalidate temp storage, either when the temp storage is changed +** from default, or when 'file' and the temp_store_directory has changed +*/ +static int invalidateTempStorage(Parse *pParse){ + sqlite3 *db = pParse->db; + if( db->aDb[1].pBt!=0 ){ + if( !db->autoCommit ){ + sqlite3ErrorMsg(pParse, "temporary storage cannot be changed " + "from within a transaction"); + return SQLITE_ERROR; + } + sqlite3BtreeClose(db->aDb[1].pBt); + db->aDb[1].pBt = 0; + sqlite3ResetInternalSchema(db, 0); + } + return SQLITE_OK; +} +#endif /* SQLITE_PAGER_PRAGMAS */ + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* +** If the TEMP database is open, close it and mark the database schema +** as needing reloading. This must be done when using the TEMP_STORE +** or DEFAULT_TEMP_STORE pragmas. +*/ +static int changeTempStorage(Parse *pParse, const char *zStorageType){ + int ts = getTempStore(zStorageType); + sqlite3 *db = pParse->db; + if( db->temp_store==ts ) return SQLITE_OK; + if( invalidateTempStorage( pParse ) != SQLITE_OK ){ + return SQLITE_ERROR; + } + db->temp_store = ts; + return SQLITE_OK; +} +#endif /* SQLITE_PAGER_PRAGMAS */ + +/* +** Generate code to return a single integer value. +*/ +static void returnSingleInt(Parse *pParse, const char *zLabel, int value){ + Vdbe *v = sqlite3GetVdbe(pParse); + sqlite3VdbeAddOp(v, OP_Integer, value, 0); + if( pParse->explain==0 ){ + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLabel, P3_STATIC); + } + sqlite3VdbeAddOp(v, OP_Callback, 1, 0); +} + +#ifndef SQLITE_OMIT_FLAG_PRAGMAS +/* +** Check to see if zRight and zLeft refer to a pragma that queries +** or changes one of the flags in db->flags. Return 1 if so and 0 if not. +** Also, implement the pragma. +*/ +static int flagPragma(Parse *pParse, const char *zLeft, const char *zRight){ + static const struct sPragmaType { + const char *zName; /* Name of the pragma */ + int mask; /* Mask for the db->flags value */ + } aPragma[] = { + { "full_column_names", SQLITE_FullColNames }, + { "short_column_names", SQLITE_ShortColNames }, + { "count_changes", SQLITE_CountRows }, + { "empty_result_callbacks", SQLITE_NullCallback }, + { "legacy_file_format", SQLITE_LegacyFileFmt }, + { "fullfsync", SQLITE_FullFSync }, +#ifdef SQLITE_DEBUG + { "sql_trace", SQLITE_SqlTrace }, + { "vdbe_listing", SQLITE_VdbeListing }, + { "vdbe_trace", SQLITE_VdbeTrace }, +#endif +#ifndef SQLITE_OMIT_CHECK + { "ignore_check_constraints", SQLITE_IgnoreChecks }, +#endif + /* The following is VERY experimental */ + { "writable_schema", SQLITE_WriteSchema|SQLITE_RecoveryMode }, + { "omit_readlock", SQLITE_NoReadlock }, + + /* TODO: Maybe it shouldn't be possible to change the ReadUncommitted + ** flag if there are any active statements. */ + { "read_uncommitted", SQLITE_ReadUncommitted }, + }; + int i; + const struct sPragmaType *p; + for(i=0, p=aPragma; izName)==0 ){ + sqlite3 *db = pParse->db; + Vdbe *v; + v = sqlite3GetVdbe(pParse); + if( v ){ + if( zRight==0 ){ + returnSingleInt(pParse, p->zName, (db->flags & p->mask)!=0 ); + }else{ + if( getBoolean(zRight) ){ + db->flags |= p->mask; + }else{ + db->flags &= ~p->mask; + } + } + } + return 1; + } + } + return 0; +} +#endif /* SQLITE_OMIT_FLAG_PRAGMAS */ + +/* +** Process a pragma statement. +** +** Pragmas are of this form: +** +** PRAGMA [database.]id [= value] +** +** The identifier might also be a string. The value is a string, and +** identifier, or a number. If minusFlag is true, then the value is +** a number that was preceded by a minus sign. +** +** If the left side is "database.id" then pId1 is the database name +** and pId2 is the id. If the left side is just "id" then pId1 is the +** id and pId2 is any empty string. +*/ +void sqlite3Pragma( + Parse *pParse, + Token *pId1, /* First part of [database.]id field */ + Token *pId2, /* Second part of [database.]id field, or NULL */ + Token *pValue, /* Token for , or NULL */ + int minusFlag /* True if a '-' sign preceded */ +){ + char *zLeft = 0; /* Nul-terminated UTF-8 string */ + char *zRight = 0; /* Nul-terminated UTF-8 string , or NULL */ + const char *zDb = 0; /* The database name */ + Token *pId; /* Pointer to token */ + int iDb; /* Database index for */ + sqlite3 *db = pParse->db; + Db *pDb; + Vdbe *v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + + /* Interpret the [database.] part of the pragma statement. iDb is the + ** index of the database this pragma is being applied to in db.aDb[]. */ + iDb = sqlite3TwoPartName(pParse, pId1, pId2, &pId); + if( iDb<0 ) return; + pDb = &db->aDb[iDb]; + + /* If the temp database has been explicitly named as part of the + ** pragma, make sure it is open. + */ + if( iDb==1 && sqlite3OpenTempDatabase(pParse) ){ + return; + } + + zLeft = sqlite3NameFromToken(db, pId); + if( !zLeft ) return; + if( minusFlag ){ + zRight = sqlite3MPrintf(db, "-%T", pValue); + }else{ + zRight = sqlite3NameFromToken(db, pValue); + } + + zDb = ((iDb>0)?pDb->zName:0); + if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){ + goto pragma_out; + } + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS + /* + ** PRAGMA [database.]default_cache_size + ** PRAGMA [database.]default_cache_size=N + ** + ** The first form reports the current persistent setting for the + ** page cache size. The value returned is the maximum number of + ** pages in the page cache. The second form sets both the current + ** page cache size value and the persistent page cache size value + ** stored in the database file. + ** + ** The default cache size is stored in meta-value 2 of page 1 of the + ** database file. The cache size is actually the absolute value of + ** this memory location. The sign of meta-value 2 determines the + ** synchronous setting. A negative value means synchronous is off + ** and a positive value means synchronous is on. + */ + if( sqlite3StrICmp(zLeft,"default_cache_size")==0 ){ + static const VdbeOpList getCacheSize[] = { + { OP_ReadCookie, 0, 2, 0}, /* 0 */ + { OP_AbsValue, 0, 0, 0}, + { OP_Dup, 0, 0, 0}, + { OP_Integer, 0, 0, 0}, + { OP_Ne, 0, 6, 0}, + { OP_Integer, 0, 0, 0}, /* 5 */ + { OP_Callback, 1, 0, 0}, + }; + int addr; + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + sqlite3VdbeUsesBtree(v, iDb); + if( !zRight ){ + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cache_size", P3_STATIC); + addr = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize); + sqlite3VdbeChangeP1(v, addr, iDb); + sqlite3VdbeChangeP1(v, addr+5, SQLITE_DEFAULT_CACHE_SIZE); + }else{ + int size = atoi(zRight); + if( size<0 ) size = -size; + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3VdbeAddOp(v, OP_Integer, size, 0); + sqlite3VdbeAddOp(v, OP_ReadCookie, iDb, 2); + addr = sqlite3VdbeAddOp(v, OP_Integer, 0, 0); + sqlite3VdbeAddOp(v, OP_Ge, 0, addr+3); + sqlite3VdbeAddOp(v, OP_Negative, 0, 0); + sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 2); + pDb->pSchema->cache_size = size; + sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + } + }else + + /* + ** PRAGMA [database.]page_size + ** PRAGMA [database.]page_size=N + ** + ** The first form reports the current setting for the + ** database page size in bytes. The second form sets the + ** database page size value. The value can only be set if + ** the database has not yet been created. + */ + if( sqlite3StrICmp(zLeft,"page_size")==0 ){ + Btree *pBt = pDb->pBt; + if( !zRight ){ + int size = pBt ? sqlite3BtreeGetPageSize(pBt) : 0; + returnSingleInt(pParse, "page_size", size); + }else{ + /* Malloc may fail when setting the page-size, as there is an internal + ** buffer that the pager module resizes using sqlite3_realloc(). + */ + if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, atoi(zRight), -1) ){ + db->mallocFailed = 1; + } + } + }else + + /* + ** PRAGMA [database.]max_page_count + ** PRAGMA [database.]max_page_count=N + ** + ** The first form reports the current setting for the + ** maximum number of pages in the database file. The + ** second form attempts to change this setting. Both + ** forms return the current setting. + */ + if( sqlite3StrICmp(zLeft,"max_page_count")==0 ){ + Btree *pBt = pDb->pBt; + int newMax = 0; + if( zRight ){ + newMax = atoi(zRight); + } + if( pBt ){ + newMax = sqlite3BtreeMaxPageCount(pBt, newMax); + } + returnSingleInt(pParse, "max_page_count", newMax); + }else + + /* + ** PRAGMA [database.]locking_mode + ** PRAGMA [database.]locking_mode = (normal|exclusive) + */ + if( sqlite3StrICmp(zLeft,"locking_mode")==0 ){ + const char *zRet = "normal"; + int eMode = getLockingMode(zRight); + + if( pId2->n==0 && eMode==PAGER_LOCKINGMODE_QUERY ){ + /* Simple "PRAGMA locking_mode;" statement. This is a query for + ** the current default locking mode (which may be different to + ** the locking-mode of the main database). + */ + eMode = db->dfltLockMode; + }else{ + Pager *pPager; + if( pId2->n==0 ){ + /* This indicates that no database name was specified as part + ** of the PRAGMA command. In this case the locking-mode must be + ** set on all attached databases, as well as the main db file. + ** + ** Also, the sqlite3.dfltLockMode variable is set so that + ** any subsequently attached databases also use the specified + ** locking mode. + */ + int ii; + assert(pDb==&db->aDb[0]); + for(ii=2; iinDb; ii++){ + pPager = sqlite3BtreePager(db->aDb[ii].pBt); + sqlite3PagerLockingMode(pPager, eMode); + } + db->dfltLockMode = eMode; + } + pPager = sqlite3BtreePager(pDb->pBt); + eMode = sqlite3PagerLockingMode(pPager, eMode); + } + + assert(eMode==PAGER_LOCKINGMODE_NORMAL||eMode==PAGER_LOCKINGMODE_EXCLUSIVE); + if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){ + zRet = "exclusive"; + } + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "locking_mode", P3_STATIC); + sqlite3VdbeOp3(v, OP_String8, 0, 0, zRet, 0); + sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + }else +#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ + + /* + ** PRAGMA [database.]auto_vacuum + ** PRAGMA [database.]auto_vacuum=N + ** + ** Get or set the (boolean) value of the database 'auto-vacuum' parameter. + */ +#ifndef SQLITE_OMIT_AUTOVACUUM + if( sqlite3StrICmp(zLeft,"auto_vacuum")==0 ){ + Btree *pBt = pDb->pBt; + if( sqlite3ReadSchema(pParse) ){ + goto pragma_out; + } + if( !zRight ){ + int auto_vacuum = + pBt ? sqlite3BtreeGetAutoVacuum(pBt) : SQLITE_DEFAULT_AUTOVACUUM; + returnSingleInt(pParse, "auto_vacuum", auto_vacuum); + }else{ + int eAuto = getAutoVacuum(zRight); + if( eAuto>=0 ){ + /* Call SetAutoVacuum() to set initialize the internal auto and + ** incr-vacuum flags. This is required in case this connection + ** creates the database file. It is important that it is created + ** as an auto-vacuum capable db. + */ + int rc = sqlite3BtreeSetAutoVacuum(pBt, eAuto); + if( rc==SQLITE_OK && (eAuto==1 || eAuto==2) ){ + /* When setting the auto_vacuum mode to either "full" or + ** "incremental", write the value of meta[6] in the database + ** file. Before writing to meta[6], check that meta[3] indicates + ** that this really is an auto-vacuum capable database. + */ + static const VdbeOpList setMeta6[] = { + { OP_Transaction, 0, 1, 0}, /* 0 */ + { OP_ReadCookie, 0, 3, 0}, /* 1 */ + { OP_If, 0, 0, 0}, /* 2 */ + { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */ + { OP_Integer, 0, 0, 0}, /* 4 */ + { OP_SetCookie, 0, 6, 0}, /* 5 */ + }; + int iAddr; + iAddr = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6); + sqlite3VdbeChangeP1(v, iAddr, iDb); + sqlite3VdbeChangeP1(v, iAddr+1, iDb); + sqlite3VdbeChangeP2(v, iAddr+2, iAddr+4); + sqlite3VdbeChangeP1(v, iAddr+4, eAuto-1); + sqlite3VdbeChangeP1(v, iAddr+5, iDb); + sqlite3VdbeUsesBtree(v, iDb); + } + } + } + }else +#endif + + /* + ** PRAGMA [database.]incremental_vacuum(N) + ** + ** Do N steps of incremental vacuuming on a database. + */ +#ifndef SQLITE_OMIT_AUTOVACUUM + if( sqlite3StrICmp(zLeft,"incremental_vacuum")==0 ){ + int iLimit, addr; + if( sqlite3ReadSchema(pParse) ){ + goto pragma_out; + } + if( zRight==0 || !sqlite3GetInt32(zRight, &iLimit) || iLimit<=0 ){ + iLimit = 0x7fffffff; + } + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3VdbeAddOp(v, OP_MemInt, iLimit, 0); + addr = sqlite3VdbeAddOp(v, OP_IncrVacuum, iDb, 0); + sqlite3VdbeAddOp(v, OP_Callback, 0, 0); + sqlite3VdbeAddOp(v, OP_MemIncr, -1, 0); + sqlite3VdbeAddOp(v, OP_IfMemPos, 0, addr); + sqlite3VdbeJumpHere(v, addr); + }else +#endif + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS + /* + ** PRAGMA [database.]cache_size + ** PRAGMA [database.]cache_size=N + ** + ** The first form reports the current local setting for the + ** page cache size. The local setting can be different from + ** the persistent cache size value that is stored in the database + ** file itself. The value returned is the maximum number of + ** pages in the page cache. The second form sets the local + ** page cache size value. It does not change the persistent + ** cache size stored on the disk so the cache size will revert + ** to its default value when the database is closed and reopened. + ** N should be a positive integer. + */ + if( sqlite3StrICmp(zLeft,"cache_size")==0 ){ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + if( !zRight ){ + returnSingleInt(pParse, "cache_size", pDb->pSchema->cache_size); + }else{ + int size = atoi(zRight); + if( size<0 ) size = -size; + pDb->pSchema->cache_size = size; + sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + } + }else + + /* + ** PRAGMA temp_store + ** PRAGMA temp_store = "default"|"memory"|"file" + ** + ** Return or set the local value of the temp_store flag. Changing + ** the local value does not make changes to the disk file and the default + ** value will be restored the next time the database is opened. + ** + ** Note that it is possible for the library compile-time options to + ** override this setting + */ + if( sqlite3StrICmp(zLeft, "temp_store")==0 ){ + if( !zRight ){ + returnSingleInt(pParse, "temp_store", db->temp_store); + }else{ + changeTempStorage(pParse, zRight); + } + }else + + /* + ** PRAGMA temp_store_directory + ** PRAGMA temp_store_directory = ""|"directory_name" + ** + ** Return or set the local value of the temp_store_directory flag. Changing + ** the value sets a specific directory to be used for temporary files. + ** Setting to a null string reverts to the default temporary directory search. + ** If temporary directory is changed, then invalidateTempStorage. + ** + */ + if( sqlite3StrICmp(zLeft, "temp_store_directory")==0 ){ + if( !zRight ){ + if( sqlite3_temp_directory ){ + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, + "temp_store_directory", P3_STATIC); + sqlite3VdbeOp3(v, OP_String8, 0, 0, sqlite3_temp_directory, 0); + sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + } + }else{ + if( zRight[0] + && !sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE) + ){ + sqlite3ErrorMsg(pParse, "not a writable directory"); + goto pragma_out; + } + if( TEMP_STORE==0 + || (TEMP_STORE==1 && db->temp_store<=1) + || (TEMP_STORE==2 && db->temp_store==1) + ){ + invalidateTempStorage(pParse); + } + sqlite3_free(sqlite3_temp_directory); + if( zRight[0] ){ + sqlite3_temp_directory = zRight; + zRight = 0; + }else{ + sqlite3_temp_directory = 0; + } + } + }else + + /* + ** PRAGMA [database.]synchronous + ** PRAGMA [database.]synchronous=OFF|ON|NORMAL|FULL + ** + ** Return or set the local value of the synchronous flag. Changing + ** the local value does not make changes to the disk file and the + ** default value will be restored the next time the database is + ** opened. + */ + if( sqlite3StrICmp(zLeft,"synchronous")==0 ){ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + if( !zRight ){ + returnSingleInt(pParse, "synchronous", pDb->safety_level-1); + }else{ + if( !db->autoCommit ){ + sqlite3ErrorMsg(pParse, + "Safety level may not be changed inside a transaction"); + }else{ + pDb->safety_level = getSafetyLevel(zRight)+1; + } + } + }else +#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ + +#ifndef SQLITE_OMIT_FLAG_PRAGMAS + if( flagPragma(pParse, zLeft, zRight) ){ + /* The flagPragma() subroutine also generates any necessary code + ** there is nothing more to do here */ + }else +#endif /* SQLITE_OMIT_FLAG_PRAGMAS */ + +#ifndef SQLITE_OMIT_SCHEMA_PRAGMAS + /* + ** PRAGMA table_info(
) + ** + ** Return a single row for each column of the named table. The columns of + ** the returned data set are: + ** + ** cid: Column id (numbered from left to right, starting at 0) + ** name: Column name + ** type: Column declaration type. + ** notnull: True if 'NOT NULL' is part of column declaration + ** dflt_value: The default value for the column, if any. + */ + if( sqlite3StrICmp(zLeft, "table_info")==0 && zRight ){ + Table *pTab; + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + pTab = sqlite3FindTable(db, zRight, zDb); + if( pTab ){ + int i; + int nHidden = 0; + Column *pCol; + sqlite3VdbeSetNumCols(v, 6); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cid", P3_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "type", P3_STATIC); + sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "notnull", P3_STATIC); + sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "dflt_value", P3_STATIC); + sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "pk", P3_STATIC); + sqlite3ViewGetColumnNames(pParse, pTab); + for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ + const Token *pDflt; + if( IsHiddenColumn(pCol) ){ + nHidden++; + continue; + } + sqlite3VdbeAddOp(v, OP_Integer, i-nHidden, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pCol->zName, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, + pCol->zType ? pCol->zType : "", 0); + sqlite3VdbeAddOp(v, OP_Integer, pCol->notNull, 0); + if( pCol->pDflt && (pDflt = &pCol->pDflt->span)->z ){ + sqlite3VdbeOp3(v, OP_String8, 0, 0, (char*)pDflt->z, pDflt->n); + }else{ + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + } + sqlite3VdbeAddOp(v, OP_Integer, pCol->isPrimKey, 0); + sqlite3VdbeAddOp(v, OP_Callback, 6, 0); + } + } + }else + + if( sqlite3StrICmp(zLeft, "index_info")==0 && zRight ){ + Index *pIdx; + Table *pTab; + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + pIdx = sqlite3FindIndex(db, zRight, zDb); + if( pIdx ){ + int i; + pTab = pIdx->pTable; + sqlite3VdbeSetNumCols(v, 3); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seqno", P3_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "cid", P3_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "name", P3_STATIC); + for(i=0; inColumn; i++){ + int cnum = pIdx->aiColumn[i]; + sqlite3VdbeAddOp(v, OP_Integer, i, 0); + sqlite3VdbeAddOp(v, OP_Integer, cnum, 0); + assert( pTab->nCol>cnum ); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->aCol[cnum].zName, 0); + sqlite3VdbeAddOp(v, OP_Callback, 3, 0); + } + } + }else + + if( sqlite3StrICmp(zLeft, "index_list")==0 && zRight ){ + Index *pIdx; + Table *pTab; + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + pTab = sqlite3FindTable(db, zRight, zDb); + if( pTab ){ + v = sqlite3GetVdbe(pParse); + pIdx = pTab->pIndex; + if( pIdx ){ + int i = 0; + sqlite3VdbeSetNumCols(v, 3); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", P3_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "unique", P3_STATIC); + while(pIdx){ + sqlite3VdbeAddOp(v, OP_Integer, i, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pIdx->zName, 0); + sqlite3VdbeAddOp(v, OP_Integer, pIdx->onError!=OE_None, 0); + sqlite3VdbeAddOp(v, OP_Callback, 3, 0); + ++i; + pIdx = pIdx->pNext; + } + } + } + }else + + if( sqlite3StrICmp(zLeft, "database_list")==0 ){ + int i; + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + sqlite3VdbeSetNumCols(v, 3); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", P3_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "file", P3_STATIC); + for(i=0; inDb; i++){ + if( db->aDb[i].pBt==0 ) continue; + assert( db->aDb[i].zName!=0 ); + sqlite3VdbeAddOp(v, OP_Integer, i, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, db->aDb[i].zName, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, + sqlite3BtreeGetFilename(db->aDb[i].pBt), 0); + sqlite3VdbeAddOp(v, OP_Callback, 3, 0); + } + }else + + if( sqlite3StrICmp(zLeft, "collation_list")==0 ){ + int i = 0; + HashElem *p; + sqlite3VdbeSetNumCols(v, 2); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", P3_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); + for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){ + CollSeq *pColl = (CollSeq *)sqliteHashData(p); + sqlite3VdbeAddOp(v, OP_Integer, i++, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pColl->zName, 0); + sqlite3VdbeAddOp(v, OP_Callback, 2, 0); + } + }else +#endif /* SQLITE_OMIT_SCHEMA_PRAGMAS */ + +#ifndef SQLITE_OMIT_FOREIGN_KEY + if( sqlite3StrICmp(zLeft, "foreign_key_list")==0 && zRight ){ + FKey *pFK; + Table *pTab; + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + pTab = sqlite3FindTable(db, zRight, zDb); + if( pTab ){ + v = sqlite3GetVdbe(pParse); + pFK = pTab->pFKey; + if( pFK ){ + int i = 0; + sqlite3VdbeSetNumCols(v, 5); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "id", P3_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "seq", P3_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "table", P3_STATIC); + sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "from", P3_STATIC); + sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "to", P3_STATIC); + while(pFK){ + int j; + for(j=0; jnCol; j++){ + char *zCol = pFK->aCol[j].zCol; + sqlite3VdbeAddOp(v, OP_Integer, i, 0); + sqlite3VdbeAddOp(v, OP_Integer, j, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, pFK->zTo, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, + pTab->aCol[pFK->aCol[j].iFrom].zName, 0); + sqlite3VdbeOp3(v, zCol ? OP_String8 : OP_Null, 0, 0, zCol, 0); + sqlite3VdbeAddOp(v, OP_Callback, 5, 0); + } + ++i; + pFK = pFK->pNextFrom; + } + } + } + }else +#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ + +#ifndef NDEBUG + if( sqlite3StrICmp(zLeft, "parser_trace")==0 ){ + if( zRight ){ + if( getBoolean(zRight) ){ + sqlite3ParserTrace(stderr, "parser: "); + }else{ + sqlite3ParserTrace(0, 0); + } + } + }else +#endif + + /* Reinstall the LIKE and GLOB functions. The variant of LIKE + ** used will be case sensitive or not depending on the RHS. + */ + if( sqlite3StrICmp(zLeft, "case_sensitive_like")==0 ){ + if( zRight ){ + sqlite3RegisterLikeFunctions(db, getBoolean(zRight)); + } + }else + +#ifndef SQLITE_INTEGRITY_CHECK_ERROR_MAX +# define SQLITE_INTEGRITY_CHECK_ERROR_MAX 100 +#endif + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK + if( sqlite3StrICmp(zLeft, "integrity_check")==0 ){ + int i, j, addr, mxErr; + + /* Code that appears at the end of the integrity check. If no error + ** messages have been generated, output OK. Otherwise output the + ** error message + */ + static const VdbeOpList endCode[] = { + { OP_MemLoad, 0, 0, 0}, + { OP_Integer, 0, 0, 0}, + { OP_Ne, 0, 0, 0}, /* 2 */ + { OP_String8, 0, 0, "ok"}, + { OP_Callback, 1, 0, 0}, + }; + + /* Initialize the VDBE program */ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "integrity_check", P3_STATIC); + + /* Set the maximum error count */ + mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; + if( zRight ){ + mxErr = atoi(zRight); + if( mxErr<=0 ){ + mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; + } + } + sqlite3VdbeAddOp(v, OP_MemInt, mxErr, 0); + + /* Do an integrity check on each database file */ + for(i=0; inDb; i++){ + HashElem *x; + Hash *pTbls; + int cnt = 0; + + if( OMIT_TEMPDB && i==1 ) continue; + + sqlite3CodeVerifySchema(pParse, i); + addr = sqlite3VdbeAddOp(v, OP_IfMemPos, 0, 0); + sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + sqlite3VdbeJumpHere(v, addr); + + /* Do an integrity check of the B-Tree + */ + pTbls = &db->aDb[i].pSchema->tblHash; + for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + Table *pTab = sqliteHashData(x); + Index *pIdx; + sqlite3VdbeAddOp(v, OP_Integer, pTab->tnum, 0); + cnt++; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + sqlite3VdbeAddOp(v, OP_Integer, pIdx->tnum, 0); + cnt++; + } + } + if( cnt==0 ) continue; + sqlite3VdbeAddOp(v, OP_IntegrityCk, 0, i); + addr = sqlite3VdbeAddOp(v, OP_IsNull, -1, 0); + sqlite3VdbeOp3(v, OP_String8, 0, 0, + sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName), + P3_DYNAMIC); + sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + sqlite3VdbeAddOp(v, OP_Concat, 0, 0); + sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeJumpHere(v, addr); + + /* Make sure all the indices are constructed correctly. + */ + for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + Table *pTab = sqliteHashData(x); + Index *pIdx; + int loopTop; + + if( pTab->pIndex==0 ) continue; + addr = sqlite3VdbeAddOp(v, OP_IfMemPos, 0, 0); + sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + sqlite3VdbeJumpHere(v, addr); + sqlite3OpenTableAndIndices(pParse, pTab, 1, OP_OpenRead); + sqlite3VdbeAddOp(v, OP_MemInt, 0, 1); + loopTop = sqlite3VdbeAddOp(v, OP_Rewind, 1, 0); + sqlite3VdbeAddOp(v, OP_MemIncr, 1, 1); + for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + int jmp2; + static const VdbeOpList idxErr[] = { + { OP_MemIncr, -1, 0, 0}, + { OP_String8, 0, 0, "rowid "}, + { OP_Rowid, 1, 0, 0}, + { OP_String8, 0, 0, " missing from index "}, + { OP_String8, 0, 0, 0}, /* 4 */ + { OP_Concat, 2, 0, 0}, + { OP_Callback, 1, 0, 0}, + }; + sqlite3GenerateIndexKey(v, pIdx, 1); + jmp2 = sqlite3VdbeAddOp(v, OP_Found, j+2, 0); + addr = sqlite3VdbeAddOpList(v, ArraySize(idxErr), idxErr); + sqlite3VdbeChangeP3(v, addr+4, pIdx->zName, P3_STATIC); + sqlite3VdbeJumpHere(v, jmp2); + } + sqlite3VdbeAddOp(v, OP_Next, 1, loopTop+1); + sqlite3VdbeJumpHere(v, loopTop); + for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + static const VdbeOpList cntIdx[] = { + { OP_MemInt, 0, 2, 0}, + { OP_Rewind, 0, 0, 0}, /* 1 */ + { OP_MemIncr, 1, 2, 0}, + { OP_Next, 0, 0, 0}, /* 3 */ + { OP_MemLoad, 1, 0, 0}, + { OP_MemLoad, 2, 0, 0}, + { OP_Eq, 0, 0, 0}, /* 6 */ + { OP_MemIncr, -1, 0, 0}, + { OP_String8, 0, 0, "wrong # of entries in index "}, + { OP_String8, 0, 0, 0}, /* 9 */ + { OP_Concat, 0, 0, 0}, + { OP_Callback, 1, 0, 0}, + }; + if( pIdx->tnum==0 ) continue; + addr = sqlite3VdbeAddOp(v, OP_IfMemPos, 0, 0); + sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + sqlite3VdbeJumpHere(v, addr); + addr = sqlite3VdbeAddOpList(v, ArraySize(cntIdx), cntIdx); + sqlite3VdbeChangeP1(v, addr+1, j+2); + sqlite3VdbeChangeP2(v, addr+1, addr+4); + sqlite3VdbeChangeP1(v, addr+3, j+2); + sqlite3VdbeChangeP2(v, addr+3, addr+2); + sqlite3VdbeJumpHere(v, addr+6); + sqlite3VdbeChangeP3(v, addr+9, pIdx->zName, P3_STATIC); + } + } + } + addr = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode); + sqlite3VdbeChangeP1(v, addr+1, mxErr); + sqlite3VdbeJumpHere(v, addr+2); + }else +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +#ifndef SQLITE_OMIT_UTF16 + /* + ** PRAGMA encoding + ** PRAGMA encoding = "utf-8"|"utf-16"|"utf-16le"|"utf-16be" + ** + ** In it's first form, this pragma returns the encoding of the main + ** database. If the database is not initialized, it is initialized now. + ** + ** The second form of this pragma is a no-op if the main database file + ** has not already been initialized. In this case it sets the default + ** encoding that will be used for the main database file if a new file + ** is created. If an existing main database file is opened, then the + ** default text encoding for the existing database is used. + ** + ** In all cases new databases created using the ATTACH command are + ** created to use the same default text encoding as the main database. If + ** the main database has not been initialized and/or created when ATTACH + ** is executed, this is done before the ATTACH operation. + ** + ** In the second form this pragma sets the text encoding to be used in + ** new database files created using this database handle. It is only + ** useful if invoked immediately after the main database i + */ + if( sqlite3StrICmp(zLeft, "encoding")==0 ){ + static const struct EncName { + char *zName; + u8 enc; + } encnames[] = { + { "UTF-8", SQLITE_UTF8 }, + { "UTF8", SQLITE_UTF8 }, + { "UTF-16le", SQLITE_UTF16LE }, + { "UTF16le", SQLITE_UTF16LE }, + { "UTF-16be", SQLITE_UTF16BE }, + { "UTF16be", SQLITE_UTF16BE }, + { "UTF-16", 0 }, /* SQLITE_UTF16NATIVE */ + { "UTF16", 0 }, /* SQLITE_UTF16NATIVE */ + { 0, 0 } + }; + const struct EncName *pEnc; + if( !zRight ){ /* "PRAGMA encoding" */ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "encoding", P3_STATIC); + sqlite3VdbeAddOp(v, OP_String8, 0, 0); + for(pEnc=&encnames[0]; pEnc->zName; pEnc++){ + if( pEnc->enc==ENC(pParse->db) ){ + sqlite3VdbeChangeP3(v, -1, pEnc->zName, P3_STATIC); + break; + } + } + sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + }else{ /* "PRAGMA encoding = XXX" */ + /* Only change the value of sqlite.enc if the database handle is not + ** initialized. If the main database exists, the new sqlite.enc value + ** will be overwritten when the schema is next loaded. If it does not + ** already exists, it will be created to use the new encoding value. + */ + if( + !(DbHasProperty(db, 0, DB_SchemaLoaded)) || + DbHasProperty(db, 0, DB_Empty) + ){ + for(pEnc=&encnames[0]; pEnc->zName; pEnc++){ + if( 0==sqlite3StrICmp(zRight, pEnc->zName) ){ + ENC(pParse->db) = pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE; + break; + } + } + if( !pEnc->zName ){ + sqlite3ErrorMsg(pParse, "unsupported encoding: %s", zRight); + } + } + } + }else +#endif /* SQLITE_OMIT_UTF16 */ + +#ifndef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS + /* + ** PRAGMA [database.]schema_version + ** PRAGMA [database.]schema_version = + ** + ** PRAGMA [database.]user_version + ** PRAGMA [database.]user_version = + ** + ** The pragma's schema_version and user_version are used to set or get + ** the value of the schema-version and user-version, respectively. Both + ** the schema-version and the user-version are 32-bit signed integers + ** stored in the database header. + ** + ** The schema-cookie is usually only manipulated internally by SQLite. It + ** is incremented by SQLite whenever the database schema is modified (by + ** creating or dropping a table or index). The schema version is used by + ** SQLite each time a query is executed to ensure that the internal cache + ** of the schema used when compiling the SQL query matches the schema of + ** the database against which the compiled query is actually executed. + ** Subverting this mechanism by using "PRAGMA schema_version" to modify + ** the schema-version is potentially dangerous and may lead to program + ** crashes or database corruption. Use with caution! + ** + ** The user-version is not used internally by SQLite. It may be used by + ** applications for any purpose. + */ + if( sqlite3StrICmp(zLeft, "schema_version")==0 + || sqlite3StrICmp(zLeft, "user_version")==0 + || sqlite3StrICmp(zLeft, "freelist_count")==0 + ){ + + int iCookie; /* Cookie index. 0 for schema-cookie, 6 for user-cookie. */ + sqlite3VdbeUsesBtree(v, iDb); + switch( zLeft[0] ){ + case 's': case 'S': + iCookie = 0; + break; + case 'f': case 'F': + iCookie = 1; + iDb = (-1*(iDb+1)); + assert(iDb<=0); + break; + default: + iCookie = 5; + break; + } + + if( zRight && iDb>=0 ){ + /* Write the specified cookie value */ + static const VdbeOpList setCookie[] = { + { OP_Transaction, 0, 1, 0}, /* 0 */ + { OP_Integer, 0, 0, 0}, /* 1 */ + { OP_SetCookie, 0, 0, 0}, /* 2 */ + }; + int addr = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie); + sqlite3VdbeChangeP1(v, addr, iDb); + sqlite3VdbeChangeP1(v, addr+1, atoi(zRight)); + sqlite3VdbeChangeP1(v, addr+2, iDb); + sqlite3VdbeChangeP2(v, addr+2, iCookie); + }else{ + /* Read the specified cookie value */ + static const VdbeOpList readCookie[] = { + { OP_ReadCookie, 0, 0, 0}, /* 0 */ + { OP_Callback, 1, 0, 0} + }; + int addr = sqlite3VdbeAddOpList(v, ArraySize(readCookie), readCookie); + sqlite3VdbeChangeP1(v, addr, iDb); + sqlite3VdbeChangeP2(v, addr, iCookie); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, P3_TRANSIENT); + } + }else +#endif /* SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS */ + +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + /* + ** Report the current state of file logs for all databases + */ + if( sqlite3StrICmp(zLeft, "lock_status")==0 ){ + static const char *const azLockName[] = { + "unlocked", "shared", "reserved", "pending", "exclusive" + }; + int i; + Vdbe *v = sqlite3GetVdbe(pParse); + sqlite3VdbeSetNumCols(v, 2); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "database", P3_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "status", P3_STATIC); + for(i=0; inDb; i++){ + Btree *pBt; + Pager *pPager; + const char *zState = "unknown"; + int j; + if( db->aDb[i].zName==0 ) continue; + sqlite3VdbeOp3(v, OP_String8, 0, 0, db->aDb[i].zName, P3_STATIC); + pBt = db->aDb[i].pBt; + if( pBt==0 || (pPager = sqlite3BtreePager(pBt))==0 ){ + zState = "closed"; + }else if( sqlite3_file_control(db, db->aDb[i].zName, + SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){ + zState = azLockName[j]; + } + sqlite3VdbeOp3(v, OP_String8, 0, 0, zState, P3_STATIC); + sqlite3VdbeAddOp(v, OP_Callback, 2, 0); + } + }else +#endif + +#ifdef SQLITE_SSE + /* + ** Check to see if the sqlite_statements table exists. Create it + ** if it does not. + */ + if( sqlite3StrICmp(zLeft, "create_sqlite_statement_table")==0 ){ + extern int sqlite3CreateStatementsTable(Parse*); + sqlite3CreateStatementsTable(pParse); + }else +#endif + +#if SQLITE_HAS_CODEC + if( sqlite3StrICmp(zLeft, "key")==0 ){ + sqlite3_key(db, zRight, strlen(zRight)); + }else +#endif +#if SQLITE_HAS_CODEC || defined(SQLITE_ENABLE_CEROD) + if( sqlite3StrICmp(zLeft, "activate_extensions")==0 ){ +#if SQLITE_HAS_CODEC + if( sqlite3StrNICmp(zRight, "see-", 4)==0 ){ + extern void sqlite3_activate_see(const char*); + sqlite3_activate_see(&zRight[4]); + } +#endif +#ifdef SQLITE_ENABLE_CEROD + if( sqlite3StrNICmp(zRight, "cerod-", 6)==0 ){ + extern void sqlite3_activate_cerod(const char*); + sqlite3_activate_cerod(&zRight[6]); + } +#endif + } +#endif + + {} + + if( v ){ + /* Code an OP_Expire at the end of each PRAGMA program to cause + ** the VDBE implementing the pragma to expire. Most (all?) pragmas + ** are only valid for a single execution. + */ + sqlite3VdbeAddOp(v, OP_Expire, 1, 0); + + /* + ** Reset the safety level, in case the fullfsync flag or synchronous + ** setting changed. + */ +#ifndef SQLITE_OMIT_PAGER_PRAGMAS + if( db->autoCommit ){ + sqlite3BtreeSetSafetyLevel(pDb->pBt, pDb->safety_level, + (db->flags&SQLITE_FullFSync)!=0); + } +#endif + } +pragma_out: + sqlite3_free(zLeft); + sqlite3_free(zRight); +} + +#endif /* SQLITE_OMIT_PRAGMA || SQLITE_OMIT_PARSER */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/prepare.c b/libraries/sqlite/unix/sqlite-3.5.1/src/prepare.c new file mode 100644 index 0000000000..a1a0a40537 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/prepare.c @@ -0,0 +1,742 @@ +/* +** 2005 May 25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the implementation of the sqlite3_prepare() +** interface, and routines that contribute to loading the database schema +** from disk. +** +** $Id: prepare.c,v 1.61 2007/10/03 08:46:45 danielk1977 Exp $ +*/ +#include "sqliteInt.h" +#include + +/* +** Fill the InitData structure with an error message that indicates +** that the database is corrupt. +*/ +static void corruptSchema(InitData *pData, const char *zExtra){ + if( !pData->db->mallocFailed ){ + sqlite3SetString(pData->pzErrMsg, "malformed database schema", + zExtra!=0 && zExtra[0]!=0 ? " - " : (char*)0, zExtra, (char*)0); + } + pData->rc = SQLITE_CORRUPT; +} + +/* +** This is the callback routine for the code that initializes the +** database. See sqlite3Init() below for additional information. +** This routine is also called from the OP_ParseSchema opcode of the VDBE. +** +** Each callback contains the following information: +** +** argv[0] = name of thing being created +** argv[1] = root page number for table or index. 0 for trigger or view. +** argv[2] = SQL text for the CREATE statement. +** +*/ +int sqlite3InitCallback(void *pInit, int argc, char **argv, char **azColName){ + InitData *pData = (InitData*)pInit; + sqlite3 *db = pData->db; + int iDb = pData->iDb; + + assert( sqlite3_mutex_held(db->mutex) ); + pData->rc = SQLITE_OK; + DbClearProperty(db, iDb, DB_Empty); + if( db->mallocFailed ){ + corruptSchema(pData, 0); + return SQLITE_NOMEM; + } + + assert( argc==3 ); + if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ + if( argv[1]==0 ){ + corruptSchema(pData, 0); + return 1; + } + assert( iDb>=0 && iDbnDb ); + if( argv[2] && argv[2][0] ){ + /* Call the parser to process a CREATE TABLE, INDEX or VIEW. + ** But because db->init.busy is set to 1, no VDBE code is generated + ** or executed. All the parser does is build the internal data + ** structures that describe the table, index, or view. + */ + char *zErr; + int rc; + assert( db->init.busy ); + db->init.iDb = iDb; + db->init.newTnum = atoi(argv[1]); + rc = sqlite3_exec(db, argv[2], 0, 0, &zErr); + db->init.iDb = 0; + assert( rc!=SQLITE_OK || zErr==0 ); + if( SQLITE_OK!=rc ){ + pData->rc = rc; + if( rc==SQLITE_NOMEM ){ + db->mallocFailed = 1; + }else if( rc!=SQLITE_INTERRUPT ){ + corruptSchema(pData, zErr); + } + sqlite3_free(zErr); + return 1; + } + }else{ + /* If the SQL column is blank it means this is an index that + ** was created to be the PRIMARY KEY or to fulfill a UNIQUE + ** constraint for a CREATE TABLE. The index should have already + ** been created when we processed the CREATE TABLE. All we have + ** to do here is record the root page number for that index. + */ + Index *pIndex; + pIndex = sqlite3FindIndex(db, argv[0], db->aDb[iDb].zName); + if( pIndex==0 || pIndex->tnum!=0 ){ + /* This can occur if there exists an index on a TEMP table which + ** has the same name as another index on a permanent index. Since + ** the permanent table is hidden by the TEMP table, we can also + ** safely ignore the index on the permanent table. + */ + /* Do Nothing */; + }else{ + pIndex->tnum = atoi(argv[1]); + } + } + return 0; +} + +/* +** Attempt to read the database schema and initialize internal +** data structures for a single database file. The index of the +** database file is given by iDb. iDb==0 is used for the main +** database. iDb==1 should never be used. iDb>=2 is used for +** auxiliary databases. Return one of the SQLITE_ error codes to +** indicate success or failure. +*/ +static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){ + int rc; + BtCursor *curMain; + int size; + Table *pTab; + Db *pDb; + char const *azArg[4]; + int meta[10]; + InitData initData; + char const *zMasterSchema; + char const *zMasterName = SCHEMA_TABLE(iDb); + + /* + ** The master database table has a structure like this + */ + static const char master_schema[] = + "CREATE TABLE sqlite_master(\n" + " type text,\n" + " name text,\n" + " tbl_name text,\n" + " rootpage integer,\n" + " sql text\n" + ")" + ; +#ifndef SQLITE_OMIT_TEMPDB + static const char temp_master_schema[] = + "CREATE TEMP TABLE sqlite_temp_master(\n" + " type text,\n" + " name text,\n" + " tbl_name text,\n" + " rootpage integer,\n" + " sql text\n" + ")" + ; +#else + #define temp_master_schema 0 +#endif + + assert( iDb>=0 && iDbnDb ); + assert( db->aDb[iDb].pSchema ); + assert( sqlite3_mutex_held(db->mutex) ); + + /* zMasterSchema and zInitScript are set to point at the master schema + ** and initialisation script appropriate for the database being + ** initialised. zMasterName is the name of the master table. + */ + if( !OMIT_TEMPDB && iDb==1 ){ + zMasterSchema = temp_master_schema; + }else{ + zMasterSchema = master_schema; + } + zMasterName = SCHEMA_TABLE(iDb); + + /* Construct the schema tables. */ + sqlite3SafetyOff(db); + azArg[0] = zMasterName; + azArg[1] = "1"; + azArg[2] = zMasterSchema; + azArg[3] = 0; + initData.db = db; + initData.iDb = iDb; + initData.pzErrMsg = pzErrMsg; + rc = sqlite3InitCallback(&initData, 3, (char **)azArg, 0); + if( rc ){ + sqlite3SafetyOn(db); + rc = initData.rc; + goto error_out; + } + pTab = sqlite3FindTable(db, zMasterName, db->aDb[iDb].zName); + if( pTab ){ + pTab->readOnly = 1; + } + sqlite3SafetyOn(db); + + /* Create a cursor to hold the database open + */ + pDb = &db->aDb[iDb]; + if( pDb->pBt==0 ){ + if( !OMIT_TEMPDB && iDb==1 ){ + DbSetProperty(db, 1, DB_SchemaLoaded); + } + return SQLITE_OK; + } + sqlite3BtreeEnter(pDb->pBt); + rc = sqlite3BtreeCursor(pDb->pBt, MASTER_ROOT, 0, 0, 0, &curMain); + if( rc!=SQLITE_OK && rc!=SQLITE_EMPTY ){ + sqlite3SetString(pzErrMsg, sqlite3ErrStr(rc), (char*)0); + sqlite3BtreeLeave(pDb->pBt); + goto error_out; + } + + /* Get the database meta information. + ** + ** Meta values are as follows: + ** meta[0] Schema cookie. Changes with each schema change. + ** meta[1] File format of schema layer. + ** meta[2] Size of the page cache. + ** meta[3] Use freelist if 0. Autovacuum if greater than zero. + ** meta[4] Db text encoding. 1:UTF-8 2:UTF-16LE 3:UTF-16BE + ** meta[5] The user cookie. Used by the application. + ** meta[6] Incremental-vacuum flag. + ** meta[7] + ** meta[8] + ** meta[9] + ** + ** Note: The #defined SQLITE_UTF* symbols in sqliteInt.h correspond to + ** the possible values of meta[4]. + */ + if( rc==SQLITE_OK ){ + int i; + for(i=0; rc==SQLITE_OK && ipBt, i+1, (u32 *)&meta[i]); + } + if( rc ){ + sqlite3SetString(pzErrMsg, sqlite3ErrStr(rc), (char*)0); + sqlite3BtreeCloseCursor(curMain); + sqlite3BtreeLeave(pDb->pBt); + goto error_out; + } + }else{ + memset(meta, 0, sizeof(meta)); + } + pDb->pSchema->schema_cookie = meta[0]; + + /* If opening a non-empty database, check the text encoding. For the + ** main database, set sqlite3.enc to the encoding of the main database. + ** For an attached db, it is an error if the encoding is not the same + ** as sqlite3.enc. + */ + if( meta[4] ){ /* text encoding */ + if( iDb==0 ){ + /* If opening the main database, set ENC(db). */ + ENC(db) = (u8)meta[4]; + db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 6, 0); + }else{ + /* If opening an attached database, the encoding much match ENC(db) */ + if( meta[4]!=ENC(db) ){ + sqlite3BtreeCloseCursor(curMain); + sqlite3SetString(pzErrMsg, "attached databases must use the same" + " text encoding as main database", (char*)0); + sqlite3BtreeLeave(pDb->pBt); + return SQLITE_ERROR; + } + } + }else{ + DbSetProperty(db, iDb, DB_Empty); + } + pDb->pSchema->enc = ENC(db); + + size = meta[2]; + if( size==0 ){ size = SQLITE_DEFAULT_CACHE_SIZE; } + pDb->pSchema->cache_size = size; + sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + + /* + ** file_format==1 Version 3.0.0. + ** file_format==2 Version 3.1.3. // ALTER TABLE ADD COLUMN + ** file_format==3 Version 3.1.4. // ditto but with non-NULL defaults + ** file_format==4 Version 3.3.0. // DESC indices. Boolean constants + */ + pDb->pSchema->file_format = meta[1]; + if( pDb->pSchema->file_format==0 ){ + pDb->pSchema->file_format = 1; + } + if( pDb->pSchema->file_format>SQLITE_MAX_FILE_FORMAT ){ + sqlite3BtreeCloseCursor(curMain); + sqlite3SetString(pzErrMsg, "unsupported file format", (char*)0); + sqlite3BtreeLeave(pDb->pBt); + return SQLITE_ERROR; + } + + + /* Read the schema information out of the schema tables + */ + assert( db->init.busy ); + if( rc==SQLITE_EMPTY ){ + /* For an empty database, there is nothing to read */ + rc = SQLITE_OK; + }else{ + char *zSql; + zSql = sqlite3MPrintf(db, + "SELECT name, rootpage, sql FROM '%q'.%s", + db->aDb[iDb].zName, zMasterName); + sqlite3SafetyOff(db); + rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); + if( rc==SQLITE_ABORT ) rc = initData.rc; + sqlite3SafetyOn(db); + sqlite3_free(zSql); +#ifndef SQLITE_OMIT_ANALYZE + if( rc==SQLITE_OK ){ + sqlite3AnalysisLoad(db, iDb); + } +#endif + sqlite3BtreeCloseCursor(curMain); + } + if( db->mallocFailed ){ + /* sqlite3SetString(pzErrMsg, "out of memory", (char*)0); */ + rc = SQLITE_NOMEM; + sqlite3ResetInternalSchema(db, 0); + } + if( rc==SQLITE_OK || (db->flags&SQLITE_RecoveryMode)){ + /* Black magic: If the SQLITE_RecoveryMode flag is set, then consider + ** the schema loaded, even if errors occured. In this situation the + ** current sqlite3_prepare() operation will fail, but the following one + ** will attempt to compile the supplied statement against whatever subset + ** of the schema was loaded before the error occured. The primary + ** purpose of this is to allow access to the sqlite_master table + ** even when it's contents have been corrupted. + */ + DbSetProperty(db, iDb, DB_SchemaLoaded); + rc = SQLITE_OK; + } + sqlite3BtreeLeave(pDb->pBt); + +error_out: + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + db->mallocFailed = 1; + } + return rc; +} + +/* +** Initialize all database files - the main database file, the file +** used to store temporary tables, and any additional database files +** created using ATTACH statements. Return a success code. If an +** error occurs, write an error message into *pzErrMsg. +** +** After a database is initialized, the DB_SchemaLoaded bit is set +** bit is set in the flags field of the Db structure. If the database +** file was of zero-length, then the DB_Empty flag is also set. +*/ +int sqlite3Init(sqlite3 *db, char **pzErrMsg){ + int i, rc; + int commit_internal = !(db->flags&SQLITE_InternChanges); + + assert( sqlite3_mutex_held(db->mutex) ); + if( db->init.busy ) return SQLITE_OK; + rc = SQLITE_OK; + db->init.busy = 1; + for(i=0; rc==SQLITE_OK && inDb; i++){ + if( DbHasProperty(db, i, DB_SchemaLoaded) || i==1 ) continue; + rc = sqlite3InitOne(db, i, pzErrMsg); + if( rc ){ + sqlite3ResetInternalSchema(db, i); + } + } + + /* Once all the other databases have been initialised, load the schema + ** for the TEMP database. This is loaded last, as the TEMP database + ** schema may contain references to objects in other databases. + */ +#ifndef SQLITE_OMIT_TEMPDB + if( rc==SQLITE_OK && db->nDb>1 && !DbHasProperty(db, 1, DB_SchemaLoaded) ){ + rc = sqlite3InitOne(db, 1, pzErrMsg); + if( rc ){ + sqlite3ResetInternalSchema(db, 1); + } + } +#endif + + db->init.busy = 0; + if( rc==SQLITE_OK && commit_internal ){ + sqlite3CommitInternalChanges(db); + } + + return rc; +} + +/* +** This routine is a no-op if the database schema is already initialised. +** Otherwise, the schema is loaded. An error code is returned. +*/ +int sqlite3ReadSchema(Parse *pParse){ + int rc = SQLITE_OK; + sqlite3 *db = pParse->db; + assert( sqlite3_mutex_held(db->mutex) ); + if( !db->init.busy ){ + rc = sqlite3Init(db, &pParse->zErrMsg); + } + if( rc!=SQLITE_OK ){ + pParse->rc = rc; + pParse->nErr++; + } + return rc; +} + + +/* +** Check schema cookies in all databases. If any cookie is out +** of date, return 0. If all schema cookies are current, return 1. +*/ +static int schemaIsValid(sqlite3 *db){ + int iDb; + int rc; + BtCursor *curTemp; + int cookie; + int allOk = 1; + + assert( sqlite3_mutex_held(db->mutex) ); + for(iDb=0; allOk && iDbnDb; iDb++){ + Btree *pBt; + pBt = db->aDb[iDb].pBt; + if( pBt==0 ) continue; + rc = sqlite3BtreeCursor(pBt, MASTER_ROOT, 0, 0, 0, &curTemp); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeGetMeta(pBt, 1, (u32 *)&cookie); + if( rc==SQLITE_OK && cookie!=db->aDb[iDb].pSchema->schema_cookie ){ + allOk = 0; + } + sqlite3BtreeCloseCursor(curTemp); + } + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + db->mallocFailed = 1; + } + } + return allOk; +} + +/* +** Convert a schema pointer into the iDb index that indicates +** which database file in db->aDb[] the schema refers to. +** +** If the same database is attached more than once, the first +** attached database is returned. +*/ +int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){ + int i = -1000000; + + /* If pSchema is NULL, then return -1000000. This happens when code in + ** expr.c is trying to resolve a reference to a transient table (i.e. one + ** created by a sub-select). In this case the return value of this + ** function should never be used. + ** + ** We return -1000000 instead of the more usual -1 simply because using + ** -1000000 as incorrectly using -1000000 index into db->aDb[] is much + ** more likely to cause a segfault than -1 (of course there are assert() + ** statements too, but it never hurts to play the odds). + */ + assert( sqlite3_mutex_held(db->mutex) ); + if( pSchema ){ + for(i=0; inDb; i++){ + if( db->aDb[i].pSchema==pSchema ){ + break; + } + } + assert( i>=0 &&i>=0 && inDb ); + } + return i; +} + +/* +** Compile the UTF-8 encoded SQL statement zSql into a statement handle. +*/ +int sqlite3Prepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + Parse sParse; + char *zErrMsg = 0; + int rc = SQLITE_OK; + int i; + + assert( ppStmt ); + *ppStmt = 0; + if( sqlite3SafetyOn(db) ){ + return SQLITE_MISUSE; + } + assert( !db->mallocFailed ); + assert( sqlite3_mutex_held(db->mutex) ); + + /* If any attached database schemas are locked, do not proceed with + ** compilation. Instead return SQLITE_LOCKED immediately. + */ + for(i=0; inDb; i++) { + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + int rc; + rc = sqlite3BtreeSchemaLocked(pBt); + if( rc ){ + const char *zDb = db->aDb[i].zName; + sqlite3Error(db, SQLITE_LOCKED, "database schema is locked: %s", zDb); + sqlite3SafetyOff(db); + return SQLITE_LOCKED; + } + } + } + + memset(&sParse, 0, sizeof(sParse)); + sParse.db = db; + if( nBytes>=0 && zSql[nBytes]!=0 ){ + char *zSqlCopy; + if( nBytes>SQLITE_MAX_SQL_LENGTH ){ + return SQLITE_TOOBIG; + } + zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes); + if( zSqlCopy ){ + sqlite3RunParser(&sParse, zSqlCopy, &zErrMsg); + sqlite3_free(zSqlCopy); + } + sParse.zTail = &zSql[nBytes]; + }else{ + sqlite3RunParser(&sParse, zSql, &zErrMsg); + } + + if( db->mallocFailed ){ + sParse.rc = SQLITE_NOMEM; + } + if( sParse.rc==SQLITE_DONE ) sParse.rc = SQLITE_OK; + if( sParse.checkSchema && !schemaIsValid(db) ){ + sParse.rc = SQLITE_SCHEMA; + } + if( sParse.rc==SQLITE_SCHEMA ){ + sqlite3ResetInternalSchema(db, 0); + } + if( db->mallocFailed ){ + sParse.rc = SQLITE_NOMEM; + } + if( pzTail ){ + *pzTail = sParse.zTail; + } + rc = sParse.rc; + +#ifndef SQLITE_OMIT_EXPLAIN + if( rc==SQLITE_OK && sParse.pVdbe && sParse.explain ){ + if( sParse.explain==2 ){ + sqlite3VdbeSetNumCols(sParse.pVdbe, 3); + sqlite3VdbeSetColName(sParse.pVdbe, 0, COLNAME_NAME, "order", P3_STATIC); + sqlite3VdbeSetColName(sParse.pVdbe, 1, COLNAME_NAME, "from", P3_STATIC); + sqlite3VdbeSetColName(sParse.pVdbe, 2, COLNAME_NAME, "detail", P3_STATIC); + }else{ + sqlite3VdbeSetNumCols(sParse.pVdbe, 5); + sqlite3VdbeSetColName(sParse.pVdbe, 0, COLNAME_NAME, "addr", P3_STATIC); + sqlite3VdbeSetColName(sParse.pVdbe, 1, COLNAME_NAME, "opcode", P3_STATIC); + sqlite3VdbeSetColName(sParse.pVdbe, 2, COLNAME_NAME, "p1", P3_STATIC); + sqlite3VdbeSetColName(sParse.pVdbe, 3, COLNAME_NAME, "p2", P3_STATIC); + sqlite3VdbeSetColName(sParse.pVdbe, 4, COLNAME_NAME, "p3", P3_STATIC); + } + } +#endif + + if( sqlite3SafetyOff(db) ){ + rc = SQLITE_MISUSE; + } + + if( saveSqlFlag ){ + sqlite3VdbeSetSql(sParse.pVdbe, zSql, sParse.zTail - zSql); + } + if( rc!=SQLITE_OK || db->mallocFailed ){ + sqlite3_finalize((sqlite3_stmt*)sParse.pVdbe); + assert(!(*ppStmt)); + }else{ + *ppStmt = (sqlite3_stmt*)sParse.pVdbe; + } + + if( zErrMsg ){ + sqlite3Error(db, rc, "%s", zErrMsg); + sqlite3_free(zErrMsg); + }else{ + sqlite3Error(db, rc, 0); + } + + rc = sqlite3ApiExit(db, rc); + /* sqlite3ReleaseThreadData(); */ + assert( (rc&db->errMask)==rc ); + return rc; +} +static int sqlite3LockAndPrepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + int rc; + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + sqlite3BtreeEnterAll(db); + rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, ppStmt, pzTail); + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Rerun the compilation of a statement after a schema change. +** Return true if the statement was recompiled successfully. +** Return false if there is an error of some kind. +*/ +int sqlite3Reprepare(Vdbe *p){ + int rc; + sqlite3_stmt *pNew; + const char *zSql; + sqlite3 *db; + + assert( sqlite3_mutex_held(sqlite3VdbeDb(p)->mutex) ); + zSql = sqlite3VdbeGetSql(p); + if( zSql==0 ){ + return 0; + } + db = sqlite3VdbeDb(p); + assert( sqlite3_mutex_held(db->mutex) ); + rc = sqlite3LockAndPrepare(db, zSql, -1, 0, &pNew, 0); + if( rc ){ + assert( pNew==0 ); + return 0; + }else{ + assert( pNew!=0 ); + } + sqlite3VdbeSwap((Vdbe*)pNew, p); + sqlite3_transfer_bindings(pNew, (sqlite3_stmt*)p); + sqlite3VdbeResetStepResult((Vdbe*)pNew); + sqlite3VdbeFinalize((Vdbe*)pNew); + return 1; +} + + +/* +** Two versions of the official API. Legacy and new use. In the legacy +** version, the original SQL text is not saved in the prepared statement +** and so if a schema change occurs, SQLITE_SCHEMA is returned by +** sqlite3_step(). In the new version, the original SQL text is retained +** and the statement is automatically recompiled if an schema change +** occurs. +*/ +int sqlite3_prepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + return sqlite3LockAndPrepare(db,zSql,nBytes,0,ppStmt,pzTail); +} +int sqlite3_prepare_v2( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + return sqlite3LockAndPrepare(db,zSql,nBytes,1,ppStmt,pzTail); +} + + +#ifndef SQLITE_OMIT_UTF16 +/* +** Compile the UTF-16 encoded SQL statement zSql into a statement handle. +*/ +static int sqlite3Prepare16( + sqlite3 *db, /* Database handle. */ + const void *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + int saveSqlFlag, /* True to save SQL text into the sqlite3_stmt */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const void **pzTail /* OUT: End of parsed string */ +){ + /* This function currently works by first transforming the UTF-16 + ** encoded string to UTF-8, then invoking sqlite3_prepare(). The + ** tricky bit is figuring out the pointer to return in *pzTail. + */ + char *zSql8; + const char *zTail8 = 0; + int rc = SQLITE_OK; + + if( sqlite3SafetyCheck(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + zSql8 = sqlite3Utf16to8(db, zSql, nBytes); + if( zSql8 ){ + rc = sqlite3LockAndPrepare(db, zSql8, -1, saveSqlFlag, ppStmt, &zTail8); + } + + if( zTail8 && pzTail ){ + /* If sqlite3_prepare returns a tail pointer, we calculate the + ** equivalent pointer into the UTF-16 string by counting the unicode + ** characters between zSql8 and zTail8, and then returning a pointer + ** the same number of characters into the UTF-16 string. + */ + int chars_parsed = sqlite3Utf8CharLen(zSql8, zTail8-zSql8); + *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); + } + sqlite3_free(zSql8); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Two versions of the official API. Legacy and new use. In the legacy +** version, the original SQL text is not saved in the prepared statement +** and so if a schema change occurs, SQLITE_SCHEMA is returned by +** sqlite3_step(). In the new version, the original SQL text is retained +** and the statement is automatically recompiled if an schema change +** occurs. +*/ +int sqlite3_prepare16( + sqlite3 *db, /* Database handle. */ + const void *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const void **pzTail /* OUT: End of parsed string */ +){ + return sqlite3Prepare16(db,zSql,nBytes,0,ppStmt,pzTail); +} +int sqlite3_prepare16_v2( + sqlite3 *db, /* Database handle. */ + const void *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const void **pzTail /* OUT: End of parsed string */ +){ + return sqlite3Prepare16(db,zSql,nBytes,1,ppStmt,pzTail); +} + +#endif /* SQLITE_OMIT_UTF16 */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/printf.c b/libraries/sqlite/unix/sqlite-3.5.1/src/printf.c new file mode 100644 index 0000000000..bea91e211d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/printf.c @@ -0,0 +1,907 @@ +/* +** The "printf" code that follows dates from the 1980's. It is in +** the public domain. The original comments are included here for +** completeness. They are very out-of-date but might be useful as +** an historical reference. Most of the "enhancements" have been backed +** out so that the functionality is now the same as standard printf(). +** +************************************************************************** +** +** The following modules is an enhanced replacement for the "printf" subroutines +** found in the standard C library. The following enhancements are +** supported: +** +** + Additional functions. The standard set of "printf" functions +** includes printf, fprintf, sprintf, vprintf, vfprintf, and +** vsprintf. This module adds the following: +** +** * snprintf -- Works like sprintf, but has an extra argument +** which is the size of the buffer written to. +** +** * mprintf -- Similar to sprintf. Writes output to memory +** obtained from malloc. +** +** * xprintf -- Calls a function to dispose of output. +** +** * nprintf -- No output, but returns the number of characters +** that would have been output by printf. +** +** * A v- version (ex: vsnprintf) of every function is also +** supplied. +** +** + A few extensions to the formatting notation are supported: +** +** * The "=" flag (similar to "-") causes the output to be +** be centered in the appropriately sized field. +** +** * The %b field outputs an integer in binary notation. +** +** * The %c field now accepts a precision. The character output +** is repeated by the number of times the precision specifies. +** +** * The %' field works like %c, but takes as its character the +** next character of the format string, instead of the next +** argument. For example, printf("%.78'-") prints 78 minus +** signs, the same as printf("%.78c",'-'). +** +** + When compiled using GCC on a SPARC, this version of printf is +** faster than the library printf for SUN OS 4.1. +** +** + All functions are fully reentrant. +** +*/ +#include "sqliteInt.h" +#include + +/* +** Conversion types fall into various categories as defined by the +** following enumeration. +*/ +#define etRADIX 1 /* Integer types. %d, %x, %o, and so forth */ +#define etFLOAT 2 /* Floating point. %f */ +#define etEXP 3 /* Exponentional notation. %e and %E */ +#define etGENERIC 4 /* Floating or exponential, depending on exponent. %g */ +#define etSIZE 5 /* Return number of characters processed so far. %n */ +#define etSTRING 6 /* Strings. %s */ +#define etDYNSTRING 7 /* Dynamically allocated strings. %z */ +#define etPERCENT 8 /* Percent symbol. %% */ +#define etCHARX 9 /* Characters. %c */ +/* The rest are extensions, not normally found in printf() */ +#define etCHARLIT 10 /* Literal characters. %' */ +#define etSQLESCAPE 11 /* Strings with '\'' doubled. %q */ +#define etSQLESCAPE2 12 /* Strings with '\'' doubled and enclosed in '', + NULL pointers replaced by SQL NULL. %Q */ +#define etTOKEN 13 /* a pointer to a Token structure */ +#define etSRCLIST 14 /* a pointer to a SrcList */ +#define etPOINTER 15 /* The %p conversion */ +#define etSQLESCAPE3 16 /* %w -> Strings with '\"' doubled */ + + +/* +** An "etByte" is an 8-bit unsigned value. +*/ +typedef unsigned char etByte; + +/* +** Each builtin conversion character (ex: the 'd' in "%d") is described +** by an instance of the following structure +*/ +typedef struct et_info { /* Information about each format field */ + char fmttype; /* The format field code letter */ + etByte base; /* The base for radix conversion */ + etByte flags; /* One or more of FLAG_ constants below */ + etByte type; /* Conversion paradigm */ + etByte charset; /* Offset into aDigits[] of the digits string */ + etByte prefix; /* Offset into aPrefix[] of the prefix string */ +} et_info; + +/* +** Allowed values for et_info.flags +*/ +#define FLAG_SIGNED 1 /* True if the value to convert is signed */ +#define FLAG_INTERN 2 /* True if for internal use only */ +#define FLAG_STRING 4 /* Allow infinity precision */ + + +/* +** The following table is searched linearly, so it is good to put the +** most frequently used conversion types first. +*/ +static const char aDigits[] = "0123456789ABCDEF0123456789abcdef"; +static const char aPrefix[] = "-x0\000X0"; +static const et_info fmtinfo[] = { + { 'd', 10, 1, etRADIX, 0, 0 }, + { 's', 0, 4, etSTRING, 0, 0 }, + { 'g', 0, 1, etGENERIC, 30, 0 }, + { 'z', 0, 4, etDYNSTRING, 0, 0 }, + { 'q', 0, 4, etSQLESCAPE, 0, 0 }, + { 'Q', 0, 4, etSQLESCAPE2, 0, 0 }, + { 'w', 0, 4, etSQLESCAPE3, 0, 0 }, + { 'c', 0, 0, etCHARX, 0, 0 }, + { 'o', 8, 0, etRADIX, 0, 2 }, + { 'u', 10, 0, etRADIX, 0, 0 }, + { 'x', 16, 0, etRADIX, 16, 1 }, + { 'X', 16, 0, etRADIX, 0, 4 }, +#ifndef SQLITE_OMIT_FLOATING_POINT + { 'f', 0, 1, etFLOAT, 0, 0 }, + { 'e', 0, 1, etEXP, 30, 0 }, + { 'E', 0, 1, etEXP, 14, 0 }, + { 'G', 0, 1, etGENERIC, 14, 0 }, +#endif + { 'i', 10, 1, etRADIX, 0, 0 }, + { 'n', 0, 0, etSIZE, 0, 0 }, + { '%', 0, 0, etPERCENT, 0, 0 }, + { 'p', 16, 0, etPOINTER, 0, 1 }, + { 'T', 0, 2, etTOKEN, 0, 0 }, + { 'S', 0, 2, etSRCLIST, 0, 0 }, +}; +#define etNINFO (sizeof(fmtinfo)/sizeof(fmtinfo[0])) + +/* +** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point +** conversions will work. +*/ +#ifndef SQLITE_OMIT_FLOATING_POINT +/* +** "*val" is a double such that 0.1 <= *val < 10.0 +** Return the ascii code for the leading digit of *val, then +** multiply "*val" by 10.0 to renormalize. +** +** Example: +** input: *val = 3.14159 +** output: *val = 1.4159 function return = '3' +** +** The counter *cnt is incremented each time. After counter exceeds +** 16 (the number of significant digits in a 64-bit float) '0' is +** always returned. +*/ +static int et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ + int digit; + LONGDOUBLE_TYPE d; + if( (*cnt)++ >= 16 ) return '0'; + digit = (int)*val; + d = digit; + digit += '0'; + *val = (*val - d)*10.0; + return digit; +} +#endif /* SQLITE_OMIT_FLOATING_POINT */ + +/* +** On machines with a small stack size, you can redefine the +** SQLITE_PRINT_BUF_SIZE to be less than 350. But beware - for +** smaller values some %f conversions may go into an infinite loop. +*/ +#ifndef SQLITE_PRINT_BUF_SIZE +# define SQLITE_PRINT_BUF_SIZE 350 +#endif +#define etBUFSIZE SQLITE_PRINT_BUF_SIZE /* Size of the output buffer */ + +/* +** The root program. All variations call this core. +** +** INPUTS: +** func This is a pointer to a function taking three arguments +** 1. A pointer to anything. Same as the "arg" parameter. +** 2. A pointer to the list of characters to be output +** (Note, this list is NOT null terminated.) +** 3. An integer number of characters to be output. +** (Note: This number might be zero.) +** +** arg This is the pointer to anything which will be passed as the +** first argument to "func". Use it for whatever you like. +** +** fmt This is the format string, as in the usual print. +** +** ap This is a pointer to a list of arguments. Same as in +** vfprint. +** +** OUTPUTS: +** The return value is the total number of characters sent to +** the function "func". Returns -1 on a error. +** +** Note that the order in which automatic variables are declared below +** seems to make a big difference in determining how fast this beast +** will run. +*/ +static int vxprintf( + void (*func)(void*,const char*,int), /* Consumer of text */ + void *arg, /* First argument to the consumer */ + int useExtended, /* Allow extended %-conversions */ + const char *fmt, /* Format string */ + va_list ap /* arguments */ +){ + int c; /* Next character in the format string */ + char *bufpt; /* Pointer to the conversion buffer */ + int precision; /* Precision of the current field */ + int length; /* Length of the field */ + int idx; /* A general purpose loop counter */ + int count; /* Total number of characters output */ + int width; /* Width of the current field */ + etByte flag_leftjustify; /* True if "-" flag is present */ + etByte flag_plussign; /* True if "+" flag is present */ + etByte flag_blanksign; /* True if " " flag is present */ + etByte flag_alternateform; /* True if "#" flag is present */ + etByte flag_altform2; /* True if "!" flag is present */ + etByte flag_zeropad; /* True if field width constant starts with zero */ + etByte flag_long; /* True if "l" flag is present */ + etByte flag_longlong; /* True if the "ll" flag is present */ + etByte done; /* Loop termination flag */ + sqlite_uint64 longvalue; /* Value for integer types */ + LONGDOUBLE_TYPE realvalue; /* Value for real types */ + const et_info *infop; /* Pointer to the appropriate info structure */ + char buf[etBUFSIZE]; /* Conversion buffer */ + char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ + etByte errorflag = 0; /* True if an error is encountered */ + etByte xtype; /* Conversion paradigm */ + char *zExtra; /* Extra memory used for etTCLESCAPE conversions */ + static const char spaces[] = + " "; +#define etSPACESIZE (sizeof(spaces)-1) +#ifndef SQLITE_OMIT_FLOATING_POINT + int exp, e2; /* exponent of real numbers */ + double rounder; /* Used for rounding floating point values */ + etByte flag_dp; /* True if decimal point should be shown */ + etByte flag_rtz; /* True if trailing zeros should be removed */ + etByte flag_exp; /* True to force display of the exponent */ + int nsd; /* Number of significant digits returned */ +#endif + + func(arg,"",0); + count = length = 0; + bufpt = 0; + for(; (c=(*fmt))!=0; ++fmt){ + if( c!='%' ){ + int amt; + bufpt = (char *)fmt; + amt = 1; + while( (c=(*++fmt))!='%' && c!=0 ) amt++; + (*func)(arg,bufpt,amt); + count += amt; + if( c==0 ) break; + } + if( (c=(*++fmt))==0 ){ + errorflag = 1; + (*func)(arg,"%",1); + count++; + break; + } + /* Find out what flags are present */ + flag_leftjustify = flag_plussign = flag_blanksign = + flag_alternateform = flag_altform2 = flag_zeropad = 0; + done = 0; + do{ + switch( c ){ + case '-': flag_leftjustify = 1; break; + case '+': flag_plussign = 1; break; + case ' ': flag_blanksign = 1; break; + case '#': flag_alternateform = 1; break; + case '!': flag_altform2 = 1; break; + case '0': flag_zeropad = 1; break; + default: done = 1; break; + } + }while( !done && (c=(*++fmt))!=0 ); + /* Get the field width */ + width = 0; + if( c=='*' ){ + width = va_arg(ap,int); + if( width<0 ){ + flag_leftjustify = 1; + width = -width; + } + c = *++fmt; + }else{ + while( c>='0' && c<='9' ){ + width = width*10 + c - '0'; + c = *++fmt; + } + } + if( width > etBUFSIZE-10 ){ + width = etBUFSIZE-10; + } + /* Get the precision */ + if( c=='.' ){ + precision = 0; + c = *++fmt; + if( c=='*' ){ + precision = va_arg(ap,int); + if( precision<0 ) precision = -precision; + c = *++fmt; + }else{ + while( c>='0' && c<='9' ){ + precision = precision*10 + c - '0'; + c = *++fmt; + } + } + }else{ + precision = -1; + } + /* Get the conversion type modifier */ + if( c=='l' ){ + flag_long = 1; + c = *++fmt; + if( c=='l' ){ + flag_longlong = 1; + c = *++fmt; + }else{ + flag_longlong = 0; + } + }else{ + flag_long = flag_longlong = 0; + } + /* Fetch the info entry for the field */ + infop = 0; + for(idx=0; idxflags & FLAG_INTERN)==0 ){ + xtype = infop->type; + }else{ + return -1; + } + break; + } + } + zExtra = 0; + if( infop==0 ){ + return -1; + } + + + /* Limit the precision to prevent overflowing buf[] during conversion */ + if( precision>etBUFSIZE-40 && (infop->flags & FLAG_STRING)==0 ){ + precision = etBUFSIZE-40; + } + + /* + ** At this point, variables are initialized as follows: + ** + ** flag_alternateform TRUE if a '#' is present. + ** flag_altform2 TRUE if a '!' is present. + ** flag_plussign TRUE if a '+' is present. + ** flag_leftjustify TRUE if a '-' is present or if the + ** field width was negative. + ** flag_zeropad TRUE if the width began with 0. + ** flag_long TRUE if the letter 'l' (ell) prefixed + ** the conversion character. + ** flag_longlong TRUE if the letter 'll' (ell ell) prefixed + ** the conversion character. + ** flag_blanksign TRUE if a ' ' is present. + ** width The specified field width. This is + ** always non-negative. Zero is the default. + ** precision The specified precision. The default + ** is -1. + ** xtype The class of the conversion. + ** infop Pointer to the appropriate info struct. + */ + switch( xtype ){ + case etPOINTER: + flag_longlong = sizeof(char*)==sizeof(i64); + flag_long = sizeof(char*)==sizeof(long int); + /* Fall through into the next case */ + case etRADIX: + if( infop->flags & FLAG_SIGNED ){ + i64 v; + if( flag_longlong ) v = va_arg(ap,i64); + else if( flag_long ) v = va_arg(ap,long int); + else v = va_arg(ap,int); + if( v<0 ){ + longvalue = -v; + prefix = '-'; + }else{ + longvalue = v; + if( flag_plussign ) prefix = '+'; + else if( flag_blanksign ) prefix = ' '; + else prefix = 0; + } + }else{ + if( flag_longlong ) longvalue = va_arg(ap,u64); + else if( flag_long ) longvalue = va_arg(ap,unsigned long int); + else longvalue = va_arg(ap,unsigned int); + prefix = 0; + } + if( longvalue==0 ) flag_alternateform = 0; + if( flag_zeropad && precisioncharset]; + base = infop->base; + do{ /* Convert to ascii */ + *(--bufpt) = cset[longvalue%base]; + longvalue = longvalue/base; + }while( longvalue>0 ); + } + length = &buf[etBUFSIZE-1]-bufpt; + for(idx=precision-length; idx>0; idx--){ + *(--bufpt) = '0'; /* Zero pad */ + } + if( prefix ) *(--bufpt) = prefix; /* Add sign */ + if( flag_alternateform && infop->prefix ){ /* Add "0" or "0x" */ + const char *pre; + char x; + pre = &aPrefix[infop->prefix]; + if( *bufpt!=pre[0] ){ + for(; (x=(*pre))!=0; pre++) *(--bufpt) = x; + } + } + length = &buf[etBUFSIZE-1]-bufpt; + break; + case etFLOAT: + case etEXP: + case etGENERIC: + realvalue = va_arg(ap,double); +#ifndef SQLITE_OMIT_FLOATING_POINT + if( precision<0 ) precision = 6; /* Set default precision */ + if( precision>etBUFSIZE/2-10 ) precision = etBUFSIZE/2-10; + if( realvalue<0.0 ){ + realvalue = -realvalue; + prefix = '-'; + }else{ + if( flag_plussign ) prefix = '+'; + else if( flag_blanksign ) prefix = ' '; + else prefix = 0; + } + if( xtype==etGENERIC && precision>0 ) precision--; +#if 0 + /* Rounding works like BSD when the constant 0.4999 is used. Wierd! */ + for(idx=precision, rounder=0.4999; idx>0; idx--, rounder*=0.1); +#else + /* It makes more sense to use 0.5 */ + for(idx=precision, rounder=0.5; idx>0; idx--, rounder*=0.1){} +#endif + if( xtype==etFLOAT ) realvalue += rounder; + /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ + exp = 0; + if( sqlite3_isnan(realvalue) ){ + bufpt = "NaN"; + length = 3; + break; + } + if( realvalue>0.0 ){ + while( realvalue>=1e32 && exp<=350 ){ realvalue *= 1e-32; exp+=32; } + while( realvalue>=1e8 && exp<=350 ){ realvalue *= 1e-8; exp+=8; } + while( realvalue>=10.0 && exp<=350 ){ realvalue *= 0.1; exp++; } + while( realvalue<1e-8 && exp>=-350 ){ realvalue *= 1e8; exp-=8; } + while( realvalue<1.0 && exp>=-350 ){ realvalue *= 10.0; exp--; } + if( exp>350 || exp<-350 ){ + if( prefix=='-' ){ + bufpt = "-Inf"; + }else if( prefix=='+' ){ + bufpt = "+Inf"; + }else{ + bufpt = "Inf"; + } + length = strlen(bufpt); + break; + } + } + bufpt = buf; + /* + ** If the field type is etGENERIC, then convert to either etEXP + ** or etFLOAT, as appropriate. + */ + flag_exp = xtype==etEXP; + if( xtype!=etFLOAT ){ + realvalue += rounder; + if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } + } + if( xtype==etGENERIC ){ + flag_rtz = !flag_alternateform; + if( exp<-4 || exp>precision ){ + xtype = etEXP; + }else{ + precision = precision - exp; + xtype = etFLOAT; + } + }else{ + flag_rtz = 0; + } + if( xtype==etEXP ){ + e2 = 0; + }else{ + e2 = exp; + } + nsd = 0; + flag_dp = (precision>0) | flag_alternateform | flag_altform2; + /* The sign in front of the number */ + if( prefix ){ + *(bufpt++) = prefix; + } + /* Digits prior to the decimal point */ + if( e2<0 ){ + *(bufpt++) = '0'; + }else{ + for(; e2>=0; e2--){ + *(bufpt++) = et_getdigit(&realvalue,&nsd); + } + } + /* The decimal point */ + if( flag_dp ){ + *(bufpt++) = '.'; + } + /* "0" digits after the decimal point but before the first + ** significant digit of the number */ + for(e2++; e2<0 && precision>0; precision--, e2++){ + *(bufpt++) = '0'; + } + /* Significant digits after the decimal point */ + while( (precision--)>0 ){ + *(bufpt++) = et_getdigit(&realvalue,&nsd); + } + /* Remove trailing zeros and the "." if no digits follow the "." */ + if( flag_rtz && flag_dp ){ + while( bufpt[-1]=='0' ) *(--bufpt) = 0; + assert( bufpt>buf ); + if( bufpt[-1]=='.' ){ + if( flag_altform2 ){ + *(bufpt++) = '0'; + }else{ + *(--bufpt) = 0; + } + } + } + /* Add the "eNNN" suffix */ + if( flag_exp || (xtype==etEXP && exp) ){ + *(bufpt++) = aDigits[infop->charset]; + if( exp<0 ){ + *(bufpt++) = '-'; exp = -exp; + }else{ + *(bufpt++) = '+'; + } + if( exp>=100 ){ + *(bufpt++) = (exp/100)+'0'; /* 100's digit */ + exp %= 100; + } + *(bufpt++) = exp/10+'0'; /* 10's digit */ + *(bufpt++) = exp%10+'0'; /* 1's digit */ + } + *bufpt = 0; + + /* The converted number is in buf[] and zero terminated. Output it. + ** Note that the number is in the usual order, not reversed as with + ** integer conversions. */ + length = bufpt-buf; + bufpt = buf; + + /* Special case: Add leading zeros if the flag_zeropad flag is + ** set and we are not left justified */ + if( flag_zeropad && !flag_leftjustify && length < width){ + int i; + int nPad = width - length; + for(i=width; i>=nPad; i--){ + bufpt[i] = bufpt[i-nPad]; + } + i = prefix!=0; + while( nPad-- ) bufpt[i++] = '0'; + length = width; + } +#endif + break; + case etSIZE: + *(va_arg(ap,int*)) = count; + length = width = 0; + break; + case etPERCENT: + buf[0] = '%'; + bufpt = buf; + length = 1; + break; + case etCHARLIT: + case etCHARX: + c = buf[0] = (xtype==etCHARX ? va_arg(ap,int) : *++fmt); + if( precision>=0 ){ + for(idx=1; idx=0 && precisionetBUFSIZE ){ + bufpt = zExtra = sqlite3_malloc( n ); + if( bufpt==0 ) return -1; + }else{ + bufpt = buf; + } + j = 0; + if( needQuote ) bufpt[j++] = q; + for(i=0; (ch=escarg[i])!=0; i++){ + bufpt[j++] = ch; + if( ch==q ) bufpt[j++] = ch; + } + if( needQuote ) bufpt[j++] = q; + bufpt[j] = 0; + length = j; + /* The precision is ignored on %q and %Q */ + /* if( precision>=0 && precisionz ){ + (*func)(arg, (char*)pToken->z, pToken->n); + } + length = width = 0; + break; + } + case etSRCLIST: { + SrcList *pSrc = va_arg(ap, SrcList*); + int k = va_arg(ap, int); + struct SrcList_item *pItem = &pSrc->a[k]; + assert( k>=0 && knSrc ); + if( pItem->zDatabase && pItem->zDatabase[0] ){ + (*func)(arg, pItem->zDatabase, strlen(pItem->zDatabase)); + (*func)(arg, ".", 1); + } + (*func)(arg, pItem->zName, strlen(pItem->zName)); + length = width = 0; + break; + } + }/* End switch over the format type */ + /* + ** The text of the conversion is pointed to by "bufpt" and is + ** "length" characters long. The field width is "width". Do + ** the output. + */ + if( !flag_leftjustify ){ + register int nspace; + nspace = width-length; + if( nspace>0 ){ + count += nspace; + while( nspace>=etSPACESIZE ){ + (*func)(arg,spaces,etSPACESIZE); + nspace -= etSPACESIZE; + } + if( nspace>0 ) (*func)(arg,spaces,nspace); + } + } + if( length>0 ){ + (*func)(arg,bufpt,length); + count += length; + } + if( flag_leftjustify ){ + register int nspace; + nspace = width-length; + if( nspace>0 ){ + count += nspace; + while( nspace>=etSPACESIZE ){ + (*func)(arg,spaces,etSPACESIZE); + nspace -= etSPACESIZE; + } + if( nspace>0 ) (*func)(arg,spaces,nspace); + } + } + if( zExtra ){ + sqlite3_free(zExtra); + } + }/* End for loop over the format string */ + return errorflag ? -1 : count; +} /* End of function */ + + +/* This structure is used to store state information about the +** write to memory that is currently in progress. +*/ +struct sgMprintf { + char *zBase; /* A base allocation */ + char *zText; /* The string collected so far */ + int nChar; /* Length of the string so far */ + int nTotal; /* Output size if unconstrained */ + int nAlloc; /* Amount of space allocated in zText */ + void *(*xRealloc)(void*,int); /* Function used to realloc memory */ + int iMallocFailed; /* True if xRealloc() has failed */ +}; + +/* +** This function implements the callback from vxprintf. +** +** This routine add nNewChar characters of text in zNewText to +** the sgMprintf structure pointed to by "arg". +*/ +static void mout(void *arg, const char *zNewText, int nNewChar){ + struct sgMprintf *pM = (struct sgMprintf*)arg; + if( pM->iMallocFailed ) return; + pM->nTotal += nNewChar; + if( pM->zText ){ + if( pM->nChar + nNewChar + 1 > pM->nAlloc ){ + if( pM->xRealloc==0 ){ + nNewChar = pM->nAlloc - pM->nChar - 1; + }else{ + int nAlloc = pM->nChar + nNewChar*2 + 1; + if( pM->zText==pM->zBase ){ + pM->zText = pM->xRealloc(0, nAlloc); + if( pM->zText==0 ){ + pM->nAlloc = 0; + pM->iMallocFailed = 1; + return; + }else if( pM->nChar ){ + memcpy(pM->zText, pM->zBase, pM->nChar); + } + }else{ + char *zNew; + zNew = pM->xRealloc(pM->zText, nAlloc); + if( zNew ){ + pM->zText = zNew; + }else{ + pM->iMallocFailed = 1; + pM->xRealloc(pM->zText, 0); + pM->zText = 0; + pM->nAlloc = 0; + return; + } + } + pM->nAlloc = nAlloc; + } + } + if( nNewChar>0 ){ + memcpy(&pM->zText[pM->nChar], zNewText, nNewChar); + pM->nChar += nNewChar; + } + pM->zText[pM->nChar] = 0; + } +} + +/* +** This routine is a wrapper around xprintf() that invokes mout() as +** the consumer. +*/ +static char *base_vprintf( + void *(*xRealloc)(void*, int), /* realloc() function. May be NULL */ + int useInternal, /* Use internal %-conversions if true */ + char *zInitBuf, /* Initially write here, before mallocing */ + int nInitBuf, /* Size of zInitBuf[] */ + const char *zFormat, /* format string */ + va_list ap /* arguments */ +){ + struct sgMprintf sM; + sM.zBase = sM.zText = zInitBuf; + sM.nChar = sM.nTotal = 0; + sM.nAlloc = nInitBuf; + sM.xRealloc = xRealloc; + sM.iMallocFailed = 0; + vxprintf(mout, &sM, useInternal, zFormat, ap); + assert(sM.iMallocFailed==0 || sM.zText==0); + if( xRealloc && !sM.iMallocFailed ){ + if( sM.zText==sM.zBase ){ + sM.zText = xRealloc(0, sM.nChar+1); + if( sM.zText ){ + memcpy(sM.zText, sM.zBase, sM.nChar+1); + } + }else if( sM.nAlloc>sM.nChar+10 ){ + char *zNew; + sqlite3MallocBenignFailure(1); + zNew = xRealloc(sM.zText, sM.nChar+1); + if( zNew ){ + sM.zText = zNew; + } + } + } + return sM.zText; +} + +/* +** Realloc that is a real function, not a macro. +*/ +static void *printf_realloc(void *old, int size){ + return sqlite3_realloc(old, size); +} + +/* +** Print into memory obtained from sqliteMalloc(). Use the internal +** %-conversion extensions. +*/ +char *sqlite3VMPrintf(sqlite3 *db, const char *zFormat, va_list ap){ + char *z; + char zBase[SQLITE_PRINT_BUF_SIZE]; + z = base_vprintf(printf_realloc, 1, zBase, sizeof(zBase), zFormat, ap); + if( z==0 && db!=0 ){ + db->mallocFailed = 1; + } + return z; +} + +/* +** Print into memory obtained from sqliteMalloc(). Use the internal +** %-conversion extensions. +*/ +char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){ + va_list ap; + char *z; + char zBase[SQLITE_PRINT_BUF_SIZE]; + va_start(ap, zFormat); + z = base_vprintf(printf_realloc, 1, zBase, sizeof(zBase), zFormat, ap); + va_end(ap); + if( z==0 && db!=0 ){ + db->mallocFailed = 1; + } + return z; +} + +/* +** Print into memory obtained from sqlite3_malloc(). Omit the internal +** %-conversion extensions. +*/ +char *sqlite3_vmprintf(const char *zFormat, va_list ap){ + char zBase[SQLITE_PRINT_BUF_SIZE]; + return base_vprintf(sqlite3_realloc, 0, zBase, sizeof(zBase), zFormat, ap); +} + +/* +** Print into memory obtained from sqlite3_malloc()(). Omit the internal +** %-conversion extensions. +*/ +char *sqlite3_mprintf(const char *zFormat, ...){ + va_list ap; + char *z; + va_start(ap, zFormat); + z = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + return z; +} + +/* +** sqlite3_snprintf() works like snprintf() except that it ignores the +** current locale settings. This is important for SQLite because we +** are not able to use a "," as the decimal point in place of "." as +** specified by some locales. +*/ +char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ + char *z; + va_list ap; + + if( n<=0 ){ + return zBuf; + } + zBuf[0] = 0; + va_start(ap,zFormat); + z = base_vprintf(0, 0, zBuf, n, zFormat, ap); + va_end(ap); + return z; +} + +#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) || defined(SQLITE_MEMDEBUG) +/* +** A version of printf() that understands %lld. Used for debugging. +** The printf() built into some versions of windows does not understand %lld +** and segfaults if you give it a long long int. +*/ +void sqlite3DebugPrintf(const char *zFormat, ...){ + extern int getpid(void); + va_list ap; + char zBuf[500]; + va_start(ap, zFormat); + base_vprintf(0, 0, zBuf, sizeof(zBuf), zFormat, ap); + va_end(ap); + fprintf(stdout,"%s", zBuf); + fflush(stdout); +} +#endif diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/random.c b/libraries/sqlite/unix/sqlite-3.5.1/src/random.c new file mode 100644 index 0000000000..19b284f94f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/random.c @@ -0,0 +1,103 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code to implement a pseudo-random number +** generator (PRNG) for SQLite. +** +** Random numbers are used by some of the database backends in order +** to generate random integer keys for tables or random filenames. +** +** $Id: random.c,v 1.20 2007/08/21 13:51:23 drh Exp $ +*/ +#include "sqliteInt.h" + + +/* +** Get a single 8-bit random value from the RC4 PRNG. The Mutex +** must be held while executing this routine. +** +** Why not just use a library random generator like lrand48() for this? +** Because the OP_NewRowid opcode in the VDBE depends on having a very +** good source of random numbers. The lrand48() library function may +** well be good enough. But maybe not. Or maybe lrand48() has some +** subtle problems on some systems that could cause problems. It is hard +** to know. To minimize the risk of problems due to bad lrand48() +** implementations, SQLite uses this random number generator based +** on RC4, which we know works very well. +** +** (Later): Actually, OP_NewRowid does not depend on a good source of +** randomness any more. But we will leave this code in all the same. +*/ +static int randomByte(void){ + unsigned char t; + + /* All threads share a single random number generator. + ** This structure is the current state of the generator. + */ + static struct { + unsigned char isInit; /* True if initialized */ + unsigned char i, j; /* State variables */ + unsigned char s[256]; /* State variables */ + } prng; + + /* Initialize the state of the random number generator once, + ** the first time this routine is called. The seed value does + ** not need to contain a lot of randomness since we are not + ** trying to do secure encryption or anything like that... + ** + ** Nothing in this file or anywhere else in SQLite does any kind of + ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random + ** number generator) not as an encryption device. + */ + if( !prng.isInit ){ + int i; + char k[256]; + prng.j = 0; + prng.i = 0; + sqlite3OsRandomness(sqlite3_vfs_find(0), 256, k); + for(i=0; i<256; i++){ + prng.s[i] = i; + } + for(i=0; i<256; i++){ + prng.j += prng.s[i] + k[i]; + t = prng.s[prng.j]; + prng.s[prng.j] = prng.s[i]; + prng.s[i] = t; + } + prng.isInit = 1; + } + + /* Generate and return single random byte + */ + prng.i++; + t = prng.s[prng.i]; + prng.j += t; + prng.s[prng.i] = prng.s[prng.j]; + prng.s[prng.j] = t; + t += prng.s[prng.i]; + return prng.s[t]; +} + +/* +** Return N random bytes. +*/ +void sqlite3Randomness(int N, void *pBuf){ + unsigned char *zBuf = pBuf; + static sqlite3_mutex *mutex = 0; + if( mutex==0 ){ + mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PRNG); + } + sqlite3_mutex_enter(mutex); + while( N-- ){ + *(zBuf++) = randomByte(); + } + sqlite3_mutex_leave(mutex); +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/select.c b/libraries/sqlite/unix/sqlite-3.5.1/src/select.c new file mode 100644 index 0000000000..fbe1b066db --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/select.c @@ -0,0 +1,3539 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the parser +** to handle SELECT statements in SQLite. +** +** $Id: select.c,v 1.359 2007/08/31 17:42:48 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + + +/* +** Delete all the content of a Select structure but do not deallocate +** the select structure itself. +*/ +static void clearSelect(Select *p){ + sqlite3ExprListDelete(p->pEList); + sqlite3SrcListDelete(p->pSrc); + sqlite3ExprDelete(p->pWhere); + sqlite3ExprListDelete(p->pGroupBy); + sqlite3ExprDelete(p->pHaving); + sqlite3ExprListDelete(p->pOrderBy); + sqlite3SelectDelete(p->pPrior); + sqlite3ExprDelete(p->pLimit); + sqlite3ExprDelete(p->pOffset); +} + + +/* +** Allocate a new Select structure and return a pointer to that +** structure. +*/ +Select *sqlite3SelectNew( + Parse *pParse, /* Parsing context */ + ExprList *pEList, /* which columns to include in the result */ + SrcList *pSrc, /* the FROM clause -- which tables to scan */ + Expr *pWhere, /* the WHERE clause */ + ExprList *pGroupBy, /* the GROUP BY clause */ + Expr *pHaving, /* the HAVING clause */ + ExprList *pOrderBy, /* the ORDER BY clause */ + int isDistinct, /* true if the DISTINCT keyword is present */ + Expr *pLimit, /* LIMIT value. NULL means not used */ + Expr *pOffset /* OFFSET value. NULL means no offset */ +){ + Select *pNew; + Select standin; + sqlite3 *db = pParse->db; + pNew = sqlite3DbMallocZero(db, sizeof(*pNew) ); + assert( !pOffset || pLimit ); /* Can't have OFFSET without LIMIT. */ + if( pNew==0 ){ + pNew = &standin; + memset(pNew, 0, sizeof(*pNew)); + } + if( pEList==0 ){ + pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db,TK_ALL,0,0,0), 0); + } + pNew->pEList = pEList; + pNew->pSrc = pSrc; + pNew->pWhere = pWhere; + pNew->pGroupBy = pGroupBy; + pNew->pHaving = pHaving; + pNew->pOrderBy = pOrderBy; + pNew->isDistinct = isDistinct; + pNew->op = TK_SELECT; + assert( pOffset==0 || pLimit!=0 ); + pNew->pLimit = pLimit; + pNew->pOffset = pOffset; + pNew->iLimit = -1; + pNew->iOffset = -1; + pNew->addrOpenEphm[0] = -1; + pNew->addrOpenEphm[1] = -1; + pNew->addrOpenEphm[2] = -1; + if( pNew==&standin) { + clearSelect(pNew); + pNew = 0; + } + return pNew; +} + +/* +** Delete the given Select structure and all of its substructures. +*/ +void sqlite3SelectDelete(Select *p){ + if( p ){ + clearSelect(p); + sqlite3_free(p); + } +} + +/* +** Given 1 to 3 identifiers preceeding the JOIN keyword, determine the +** type of join. Return an integer constant that expresses that type +** in terms of the following bit values: +** +** JT_INNER +** JT_CROSS +** JT_OUTER +** JT_NATURAL +** JT_LEFT +** JT_RIGHT +** +** A full outer join is the combination of JT_LEFT and JT_RIGHT. +** +** If an illegal or unsupported join type is seen, then still return +** a join type, but put an error in the pParse structure. +*/ +int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){ + int jointype = 0; + Token *apAll[3]; + Token *p; + static const struct { + const char zKeyword[8]; + u8 nChar; + u8 code; + } keywords[] = { + { "natural", 7, JT_NATURAL }, + { "left", 4, JT_LEFT|JT_OUTER }, + { "right", 5, JT_RIGHT|JT_OUTER }, + { "full", 4, JT_LEFT|JT_RIGHT|JT_OUTER }, + { "outer", 5, JT_OUTER }, + { "inner", 5, JT_INNER }, + { "cross", 5, JT_INNER|JT_CROSS }, + }; + int i, j; + apAll[0] = pA; + apAll[1] = pB; + apAll[2] = pC; + for(i=0; i<3 && apAll[i]; i++){ + p = apAll[i]; + for(j=0; jn==keywords[j].nChar + && sqlite3StrNICmp((char*)p->z, keywords[j].zKeyword, p->n)==0 ){ + jointype |= keywords[j].code; + break; + } + } + if( j>=sizeof(keywords)/sizeof(keywords[0]) ){ + jointype |= JT_ERROR; + break; + } + } + if( + (jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) || + (jointype & JT_ERROR)!=0 + ){ + const char *zSp1 = " "; + const char *zSp2 = " "; + if( pB==0 ){ zSp1++; } + if( pC==0 ){ zSp2++; } + sqlite3ErrorMsg(pParse, "unknown or unsupported join type: " + "%T%s%T%s%T", pA, zSp1, pB, zSp2, pC); + jointype = JT_INNER; + }else if( jointype & JT_RIGHT ){ + sqlite3ErrorMsg(pParse, + "RIGHT and FULL OUTER JOINs are not currently supported"); + jointype = JT_INNER; + } + return jointype; +} + +/* +** Return the index of a column in a table. Return -1 if the column +** is not contained in the table. +*/ +static int columnIndex(Table *pTab, const char *zCol){ + int i; + for(i=0; inCol; i++){ + if( sqlite3StrICmp(pTab->aCol[i].zName, zCol)==0 ) return i; + } + return -1; +} + +/* +** Set the value of a token to a '\000'-terminated string. +*/ +static void setToken(Token *p, const char *z){ + p->z = (u8*)z; + p->n = z ? strlen(z) : 0; + p->dyn = 0; +} + +/* +** Set the token to the double-quoted and escaped version of the string pointed +** to by z. For example; +** +** {a"bc} -> {"a""bc"} +*/ +static void setQuotedToken(Parse *pParse, Token *p, const char *z){ + p->z = (u8 *)sqlite3MPrintf(0, "\"%w\"", z); + p->dyn = 1; + if( p->z ){ + p->n = strlen((char *)p->z); + }else{ + pParse->db->mallocFailed = 1; + } +} + +/* +** Create an expression node for an identifier with the name of zName +*/ +Expr *sqlite3CreateIdExpr(Parse *pParse, const char *zName){ + Token dummy; + setToken(&dummy, zName); + return sqlite3PExpr(pParse, TK_ID, 0, 0, &dummy); +} + + +/* +** Add a term to the WHERE expression in *ppExpr that requires the +** zCol column to be equal in the two tables pTab1 and pTab2. +*/ +static void addWhereTerm( + Parse *pParse, /* Parsing context */ + const char *zCol, /* Name of the column */ + const Table *pTab1, /* First table */ + const char *zAlias1, /* Alias for first table. May be NULL */ + const Table *pTab2, /* Second table */ + const char *zAlias2, /* Alias for second table. May be NULL */ + int iRightJoinTable, /* VDBE cursor for the right table */ + Expr **ppExpr /* Add the equality term to this expression */ +){ + Expr *pE1a, *pE1b, *pE1c; + Expr *pE2a, *pE2b, *pE2c; + Expr *pE; + + pE1a = sqlite3CreateIdExpr(pParse, zCol); + pE2a = sqlite3CreateIdExpr(pParse, zCol); + if( zAlias1==0 ){ + zAlias1 = pTab1->zName; + } + pE1b = sqlite3CreateIdExpr(pParse, zAlias1); + if( zAlias2==0 ){ + zAlias2 = pTab2->zName; + } + pE2b = sqlite3CreateIdExpr(pParse, zAlias2); + pE1c = sqlite3PExpr(pParse, TK_DOT, pE1b, pE1a, 0); + pE2c = sqlite3PExpr(pParse, TK_DOT, pE2b, pE2a, 0); + pE = sqlite3PExpr(pParse, TK_EQ, pE1c, pE2c, 0); + if( pE ){ + ExprSetProperty(pE, EP_FromJoin); + pE->iRightJoinTable = iRightJoinTable; + } + pE = sqlite3ExprAnd(pParse->db,*ppExpr, pE); + if( pE ){ + *ppExpr = pE; + } +} + +/* +** Set the EP_FromJoin property on all terms of the given expression. +** And set the Expr.iRightJoinTable to iTable for every term in the +** expression. +** +** The EP_FromJoin property is used on terms of an expression to tell +** the LEFT OUTER JOIN processing logic that this term is part of the +** join restriction specified in the ON or USING clause and not a part +** of the more general WHERE clause. These terms are moved over to the +** WHERE clause during join processing but we need to remember that they +** originated in the ON or USING clause. +** +** The Expr.iRightJoinTable tells the WHERE clause processing that the +** expression depends on table iRightJoinTable even if that table is not +** explicitly mentioned in the expression. That information is needed +** for cases like this: +** +** SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.b AND t1.x=5 +** +** The where clause needs to defer the handling of the t1.x=5 +** term until after the t2 loop of the join. In that way, a +** NULL t2 row will be inserted whenever t1.x!=5. If we do not +** defer the handling of t1.x=5, it will be processed immediately +** after the t1 loop and rows with t1.x!=5 will never appear in +** the output, which is incorrect. +*/ +static void setJoinExpr(Expr *p, int iTable){ + while( p ){ + ExprSetProperty(p, EP_FromJoin); + p->iRightJoinTable = iTable; + setJoinExpr(p->pLeft, iTable); + p = p->pRight; + } +} + +/* +** This routine processes the join information for a SELECT statement. +** ON and USING clauses are converted into extra terms of the WHERE clause. +** NATURAL joins also create extra WHERE clause terms. +** +** The terms of a FROM clause are contained in the Select.pSrc structure. +** The left most table is the first entry in Select.pSrc. The right-most +** table is the last entry. The join operator is held in the entry to +** the left. Thus entry 0 contains the join operator for the join between +** entries 0 and 1. Any ON or USING clauses associated with the join are +** also attached to the left entry. +** +** This routine returns the number of errors encountered. +*/ +static int sqliteProcessJoin(Parse *pParse, Select *p){ + SrcList *pSrc; /* All tables in the FROM clause */ + int i, j; /* Loop counters */ + struct SrcList_item *pLeft; /* Left table being joined */ + struct SrcList_item *pRight; /* Right table being joined */ + + pSrc = p->pSrc; + pLeft = &pSrc->a[0]; + pRight = &pLeft[1]; + for(i=0; inSrc-1; i++, pRight++, pLeft++){ + Table *pLeftTab = pLeft->pTab; + Table *pRightTab = pRight->pTab; + + if( pLeftTab==0 || pRightTab==0 ) continue; + + /* When the NATURAL keyword is present, add WHERE clause terms for + ** every column that the two tables have in common. + */ + if( pRight->jointype & JT_NATURAL ){ + if( pRight->pOn || pRight->pUsing ){ + sqlite3ErrorMsg(pParse, "a NATURAL join may not have " + "an ON or USING clause", 0); + return 1; + } + for(j=0; jnCol; j++){ + char *zName = pLeftTab->aCol[j].zName; + if( columnIndex(pRightTab, zName)>=0 ){ + addWhereTerm(pParse, zName, pLeftTab, pLeft->zAlias, + pRightTab, pRight->zAlias, + pRight->iCursor, &p->pWhere); + + } + } + } + + /* Disallow both ON and USING clauses in the same join + */ + if( pRight->pOn && pRight->pUsing ){ + sqlite3ErrorMsg(pParse, "cannot have both ON and USING " + "clauses in the same join"); + return 1; + } + + /* Add the ON clause to the end of the WHERE clause, connected by + ** an AND operator. + */ + if( pRight->pOn ){ + setJoinExpr(pRight->pOn, pRight->iCursor); + p->pWhere = sqlite3ExprAnd(pParse->db, p->pWhere, pRight->pOn); + pRight->pOn = 0; + } + + /* Create extra terms on the WHERE clause for each column named + ** in the USING clause. Example: If the two tables to be joined are + ** A and B and the USING clause names X, Y, and Z, then add this + ** to the WHERE clause: A.X=B.X AND A.Y=B.Y AND A.Z=B.Z + ** Report an error if any column mentioned in the USING clause is + ** not contained in both tables to be joined. + */ + if( pRight->pUsing ){ + IdList *pList = pRight->pUsing; + for(j=0; jnId; j++){ + char *zName = pList->a[j].zName; + if( columnIndex(pLeftTab, zName)<0 || columnIndex(pRightTab, zName)<0 ){ + sqlite3ErrorMsg(pParse, "cannot join using column %s - column " + "not present in both tables", zName); + return 1; + } + addWhereTerm(pParse, zName, pLeftTab, pLeft->zAlias, + pRightTab, pRight->zAlias, + pRight->iCursor, &p->pWhere); + } + } + } + return 0; +} + +/* +** Insert code into "v" that will push the record on the top of the +** stack into the sorter. +*/ +static void pushOntoSorter( + Parse *pParse, /* Parser context */ + ExprList *pOrderBy, /* The ORDER BY clause */ + Select *pSelect /* The whole SELECT statement */ +){ + Vdbe *v = pParse->pVdbe; + sqlite3ExprCodeExprList(pParse, pOrderBy); + sqlite3VdbeAddOp(v, OP_Sequence, pOrderBy->iECursor, 0); + sqlite3VdbeAddOp(v, OP_Pull, pOrderBy->nExpr + 1, 0); + sqlite3VdbeAddOp(v, OP_MakeRecord, pOrderBy->nExpr + 2, 0); + sqlite3VdbeAddOp(v, OP_IdxInsert, pOrderBy->iECursor, 0); + if( pSelect->iLimit>=0 ){ + int addr1, addr2; + addr1 = sqlite3VdbeAddOp(v, OP_IfMemZero, pSelect->iLimit+1, 0); + sqlite3VdbeAddOp(v, OP_MemIncr, -1, pSelect->iLimit+1); + addr2 = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp(v, OP_Last, pOrderBy->iECursor, 0); + sqlite3VdbeAddOp(v, OP_Delete, pOrderBy->iECursor, 0); + sqlite3VdbeJumpHere(v, addr2); + pSelect->iLimit = -1; + } +} + +/* +** Add code to implement the OFFSET +*/ +static void codeOffset( + Vdbe *v, /* Generate code into this VM */ + Select *p, /* The SELECT statement being coded */ + int iContinue, /* Jump here to skip the current record */ + int nPop /* Number of times to pop stack when jumping */ +){ + if( p->iOffset>=0 && iContinue!=0 ){ + int addr; + sqlite3VdbeAddOp(v, OP_MemIncr, -1, p->iOffset); + addr = sqlite3VdbeAddOp(v, OP_IfMemNeg, p->iOffset, 0); + if( nPop>0 ){ + sqlite3VdbeAddOp(v, OP_Pop, nPop, 0); + } + sqlite3VdbeAddOp(v, OP_Goto, 0, iContinue); + VdbeComment((v, "# skip OFFSET records")); + sqlite3VdbeJumpHere(v, addr); + } +} + +/* +** Add code that will check to make sure the top N elements of the +** stack are distinct. iTab is a sorting index that holds previously +** seen combinations of the N values. A new entry is made in iTab +** if the current N values are new. +** +** A jump to addrRepeat is made and the N+1 values are popped from the +** stack if the top N elements are not distinct. +*/ +static void codeDistinct( + Vdbe *v, /* Generate code into this VM */ + int iTab, /* A sorting index used to test for distinctness */ + int addrRepeat, /* Jump to here if not distinct */ + int N /* The top N elements of the stack must be distinct */ +){ + sqlite3VdbeAddOp(v, OP_MakeRecord, -N, 0); + sqlite3VdbeAddOp(v, OP_Distinct, iTab, sqlite3VdbeCurrentAddr(v)+3); + sqlite3VdbeAddOp(v, OP_Pop, N+1, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, addrRepeat); + VdbeComment((v, "# skip indistinct records")); + sqlite3VdbeAddOp(v, OP_IdxInsert, iTab, 0); +} + +/* +** Generate an error message when a SELECT is used within a subexpression +** (example: "a IN (SELECT * FROM table)") but it has more than 1 result +** column. We do this in a subroutine because the error occurs in multiple +** places. +*/ +static int checkForMultiColumnSelectError(Parse *pParse, int eDest, int nExpr){ + if( nExpr>1 && (eDest==SRT_Mem || eDest==SRT_Set) ){ + sqlite3ErrorMsg(pParse, "only a single result allowed for " + "a SELECT that is part of an expression"); + return 1; + }else{ + return 0; + } +} + +/* +** This routine generates the code for the inside of the inner loop +** of a SELECT. +** +** If srcTab and nColumn are both zero, then the pEList expressions +** are evaluated in order to get the data for this row. If nColumn>0 +** then data is pulled from srcTab and pEList is used only to get the +** datatypes for each column. +*/ +static int selectInnerLoop( + Parse *pParse, /* The parser context */ + Select *p, /* The complete select statement being coded */ + ExprList *pEList, /* List of values being extracted */ + int srcTab, /* Pull data from this table */ + int nColumn, /* Number of columns in the source table */ + ExprList *pOrderBy, /* If not NULL, sort results using this key */ + int distinct, /* If >=0, make sure results are distinct */ + int eDest, /* How to dispose of the results */ + int iParm, /* An argument to the disposal method */ + int iContinue, /* Jump here to continue with next row */ + int iBreak, /* Jump here to break out of the inner loop */ + char *aff /* affinity string if eDest is SRT_Union */ +){ + Vdbe *v = pParse->pVdbe; + int i; + int hasDistinct; /* True if the DISTINCT keyword is present */ + + if( v==0 ) return 0; + assert( pEList!=0 ); + + /* If there was a LIMIT clause on the SELECT statement, then do the check + ** to see if this row should be output. + */ + hasDistinct = distinct>=0 && pEList->nExpr>0; + if( pOrderBy==0 && !hasDistinct ){ + codeOffset(v, p, iContinue, 0); + } + + /* Pull the requested columns. + */ + if( nColumn>0 ){ + for(i=0; inExpr; + sqlite3ExprCodeExprList(pParse, pEList); + } + + /* If the DISTINCT keyword was present on the SELECT statement + ** and this row has been seen before, then do not make this row + ** part of the result. + */ + if( hasDistinct ){ + assert( pEList!=0 ); + assert( pEList->nExpr==nColumn ); + codeDistinct(v, distinct, iContinue, nColumn); + if( pOrderBy==0 ){ + codeOffset(v, p, iContinue, nColumn); + } + } + + if( checkForMultiColumnSelectError(pParse, eDest, pEList->nExpr) ){ + return 0; + } + + switch( eDest ){ + /* In this mode, write each query result to the key of the temporary + ** table iParm. + */ +#ifndef SQLITE_OMIT_COMPOUND_SELECT + case SRT_Union: { + sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); + if( aff ){ + sqlite3VdbeChangeP3(v, -1, aff, P3_STATIC); + } + sqlite3VdbeAddOp(v, OP_IdxInsert, iParm, 0); + break; + } + + /* Construct a record from the query result, but instead of + ** saving that record, use it as a key to delete elements from + ** the temporary table iParm. + */ + case SRT_Except: { + int addr; + addr = sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); + sqlite3VdbeChangeP3(v, -1, aff, P3_STATIC); + sqlite3VdbeAddOp(v, OP_NotFound, iParm, addr+3); + sqlite3VdbeAddOp(v, OP_Delete, iParm, 0); + break; + } +#endif + + /* Store the result as data using a unique key. + */ + case SRT_Table: + case SRT_EphemTab: { + sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); + if( pOrderBy ){ + pushOntoSorter(pParse, pOrderBy, p); + }else{ + sqlite3VdbeAddOp(v, OP_NewRowid, iParm, 0); + sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + sqlite3VdbeAddOp(v, OP_Insert, iParm, OPFLAG_APPEND); + } + break; + } + +#ifndef SQLITE_OMIT_SUBQUERY + /* If we are creating a set for an "expr IN (SELECT ...)" construct, + ** then there should be a single item on the stack. Write this + ** item into the set table with bogus data. + */ + case SRT_Set: { + int addr1 = sqlite3VdbeCurrentAddr(v); + int addr2; + + assert( nColumn==1 ); + sqlite3VdbeAddOp(v, OP_NotNull, -1, addr1+3); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + addr2 = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); + p->affinity = sqlite3CompareAffinity(pEList->a[0].pExpr,(iParm>>16)&0xff); + if( pOrderBy ){ + /* At first glance you would think we could optimize out the + ** ORDER BY in this case since the order of entries in the set + ** does not matter. But there might be a LIMIT clause, in which + ** case the order does matter */ + pushOntoSorter(pParse, pOrderBy, p); + }else{ + sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &p->affinity, 1); + sqlite3VdbeAddOp(v, OP_IdxInsert, (iParm&0x0000FFFF), 0); + } + sqlite3VdbeJumpHere(v, addr2); + break; + } + + /* If any row exist in the result set, record that fact and abort. + */ + case SRT_Exists: { + sqlite3VdbeAddOp(v, OP_MemInt, 1, iParm); + sqlite3VdbeAddOp(v, OP_Pop, nColumn, 0); + /* The LIMIT clause will terminate the loop for us */ + break; + } + + /* If this is a scalar select that is part of an expression, then + ** store the results in the appropriate memory cell and break out + ** of the scan loop. + */ + case SRT_Mem: { + assert( nColumn==1 ); + if( pOrderBy ){ + pushOntoSorter(pParse, pOrderBy, p); + }else{ + sqlite3VdbeAddOp(v, OP_MemStore, iParm, 1); + /* The LIMIT clause will jump out of the loop for us */ + } + break; + } +#endif /* #ifndef SQLITE_OMIT_SUBQUERY */ + + /* Send the data to the callback function or to a subroutine. In the + ** case of a subroutine, the subroutine itself is responsible for + ** popping the data from the stack. + */ + case SRT_Subroutine: + case SRT_Callback: { + if( pOrderBy ){ + sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); + pushOntoSorter(pParse, pOrderBy, p); + }else if( eDest==SRT_Subroutine ){ + sqlite3VdbeAddOp(v, OP_Gosub, 0, iParm); + }else{ + sqlite3VdbeAddOp(v, OP_Callback, nColumn, 0); + } + break; + } + +#if !defined(SQLITE_OMIT_TRIGGER) + /* Discard the results. This is used for SELECT statements inside + ** the body of a TRIGGER. The purpose of such selects is to call + ** user-defined functions that have side effects. We do not care + ** about the actual results of the select. + */ + default: { + assert( eDest==SRT_Discard ); + sqlite3VdbeAddOp(v, OP_Pop, nColumn, 0); + break; + } +#endif + } + + /* Jump to the end of the loop if the LIMIT is reached. + */ + if( p->iLimit>=0 && pOrderBy==0 ){ + sqlite3VdbeAddOp(v, OP_MemIncr, -1, p->iLimit); + sqlite3VdbeAddOp(v, OP_IfMemZero, p->iLimit, iBreak); + } + return 0; +} + +/* +** Given an expression list, generate a KeyInfo structure that records +** the collating sequence for each expression in that expression list. +** +** If the ExprList is an ORDER BY or GROUP BY clause then the resulting +** KeyInfo structure is appropriate for initializing a virtual index to +** implement that clause. If the ExprList is the result set of a SELECT +** then the KeyInfo structure is appropriate for initializing a virtual +** index to implement a DISTINCT test. +** +** Space to hold the KeyInfo structure is obtain from malloc. The calling +** function is responsible for seeing that this structure is eventually +** freed. Add the KeyInfo structure to the P3 field of an opcode using +** P3_KEYINFO_HANDOFF is the usual way of dealing with this. +*/ +static KeyInfo *keyInfoFromExprList(Parse *pParse, ExprList *pList){ + sqlite3 *db = pParse->db; + int nExpr; + KeyInfo *pInfo; + struct ExprList_item *pItem; + int i; + + nExpr = pList->nExpr; + pInfo = sqlite3DbMallocZero(db, sizeof(*pInfo) + nExpr*(sizeof(CollSeq*)+1) ); + if( pInfo ){ + pInfo->aSortOrder = (u8*)&pInfo->aColl[nExpr]; + pInfo->nField = nExpr; + pInfo->enc = ENC(db); + for(i=0, pItem=pList->a; ipExpr); + if( !pColl ){ + pColl = db->pDfltColl; + } + pInfo->aColl[i] = pColl; + pInfo->aSortOrder[i] = pItem->sortOrder; + } + } + return pInfo; +} + + +/* +** If the inner loop was generated using a non-null pOrderBy argument, +** then the results were placed in a sorter. After the loop is terminated +** we need to run the sorter and output the results. The following +** routine generates the code needed to do that. +*/ +static void generateSortTail( + Parse *pParse, /* Parsing context */ + Select *p, /* The SELECT statement */ + Vdbe *v, /* Generate code into this VDBE */ + int nColumn, /* Number of columns of data */ + int eDest, /* Write the sorted results here */ + int iParm /* Optional parameter associated with eDest */ +){ + int brk = sqlite3VdbeMakeLabel(v); + int cont = sqlite3VdbeMakeLabel(v); + int addr; + int iTab; + int pseudoTab = 0; + ExprList *pOrderBy = p->pOrderBy; + + iTab = pOrderBy->iECursor; + if( eDest==SRT_Callback || eDest==SRT_Subroutine ){ + pseudoTab = pParse->nTab++; + sqlite3VdbeAddOp(v, OP_OpenPseudo, pseudoTab, 0); + sqlite3VdbeAddOp(v, OP_SetNumColumns, pseudoTab, nColumn); + } + addr = 1 + sqlite3VdbeAddOp(v, OP_Sort, iTab, brk); + codeOffset(v, p, cont, 0); + if( eDest==SRT_Callback || eDest==SRT_Subroutine ){ + sqlite3VdbeAddOp(v, OP_Integer, 1, 0); + } + sqlite3VdbeAddOp(v, OP_Column, iTab, pOrderBy->nExpr + 1); + switch( eDest ){ + case SRT_Table: + case SRT_EphemTab: { + sqlite3VdbeAddOp(v, OP_NewRowid, iParm, 0); + sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + sqlite3VdbeAddOp(v, OP_Insert, iParm, OPFLAG_APPEND); + break; + } +#ifndef SQLITE_OMIT_SUBQUERY + case SRT_Set: { + assert( nColumn==1 ); + sqlite3VdbeAddOp(v, OP_NotNull, -1, sqlite3VdbeCurrentAddr(v)+3); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+3); + sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &p->affinity, 1); + sqlite3VdbeAddOp(v, OP_IdxInsert, (iParm&0x0000FFFF), 0); + break; + } + case SRT_Mem: { + assert( nColumn==1 ); + sqlite3VdbeAddOp(v, OP_MemStore, iParm, 1); + /* The LIMIT clause will terminate the loop for us */ + break; + } +#endif + case SRT_Callback: + case SRT_Subroutine: { + int i; + sqlite3VdbeAddOp(v, OP_Insert, pseudoTab, 0); + for(i=0; iiLimit>=0 ){ + sqlite3VdbeAddOp(v, OP_MemIncr, -1, p->iLimit); + sqlite3VdbeAddOp(v, OP_IfMemZero, p->iLimit, brk); + } + + /* The bottom of the loop + */ + sqlite3VdbeResolveLabel(v, cont); + sqlite3VdbeAddOp(v, OP_Next, iTab, addr); + sqlite3VdbeResolveLabel(v, brk); + if( eDest==SRT_Callback || eDest==SRT_Subroutine ){ + sqlite3VdbeAddOp(v, OP_Close, pseudoTab, 0); + } + +} + +/* +** Return a pointer to a string containing the 'declaration type' of the +** expression pExpr. The string may be treated as static by the caller. +** +** The declaration type is the exact datatype definition extracted from the +** original CREATE TABLE statement if the expression is a column. The +** declaration type for a ROWID field is INTEGER. Exactly when an expression +** is considered a column can be complex in the presence of subqueries. The +** result-set expression in all of the following SELECT statements is +** considered a column by this function. +** +** SELECT col FROM tbl; +** SELECT (SELECT col FROM tbl; +** SELECT (SELECT col FROM tbl); +** SELECT abc FROM (SELECT col AS abc FROM tbl); +** +** The declaration type for any expression other than a column is NULL. +*/ +static const char *columnType( + NameContext *pNC, + Expr *pExpr, + const char **pzOriginDb, + const char **pzOriginTab, + const char **pzOriginCol +){ + char const *zType = 0; + char const *zOriginDb = 0; + char const *zOriginTab = 0; + char const *zOriginCol = 0; + int j; + if( pExpr==0 || pNC->pSrcList==0 ) return 0; + + switch( pExpr->op ){ + case TK_AGG_COLUMN: + case TK_COLUMN: { + /* The expression is a column. Locate the table the column is being + ** extracted from in NameContext.pSrcList. This table may be real + ** database table or a subquery. + */ + Table *pTab = 0; /* Table structure column is extracted from */ + Select *pS = 0; /* Select the column is extracted from */ + int iCol = pExpr->iColumn; /* Index of column in pTab */ + while( pNC && !pTab ){ + SrcList *pTabList = pNC->pSrcList; + for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); + if( jnSrc ){ + pTab = pTabList->a[j].pTab; + pS = pTabList->a[j].pSelect; + }else{ + pNC = pNC->pNext; + } + } + + if( pTab==0 ){ + /* FIX ME: + ** This can occurs if you have something like "SELECT new.x;" inside + ** a trigger. In other words, if you reference the special "new" + ** table in the result set of a select. We do not have a good way + ** to find the actual table type, so call it "TEXT". This is really + ** something of a bug, but I do not know how to fix it. + ** + ** This code does not produce the correct answer - it just prevents + ** a segfault. See ticket #1229. + */ + zType = "TEXT"; + break; + } + + assert( pTab ); + if( pS ){ + /* The "table" is actually a sub-select or a view in the FROM clause + ** of the SELECT statement. Return the declaration type and origin + ** data for the result-set column of the sub-select. + */ + if( iCol>=0 && iColpEList->nExpr ){ + /* If iCol is less than zero, then the expression requests the + ** rowid of the sub-select or view. This expression is legal (see + ** test case misc2.2.2) - it always evaluates to NULL. + */ + NameContext sNC; + Expr *p = pS->pEList->a[iCol].pExpr; + sNC.pSrcList = pS->pSrc; + sNC.pNext = 0; + sNC.pParse = pNC->pParse; + zType = columnType(&sNC, p, &zOriginDb, &zOriginTab, &zOriginCol); + } + }else if( pTab->pSchema ){ + /* A real table */ + assert( !pS ); + if( iCol<0 ) iCol = pTab->iPKey; + assert( iCol==-1 || (iCol>=0 && iColnCol) ); + if( iCol<0 ){ + zType = "INTEGER"; + zOriginCol = "rowid"; + }else{ + zType = pTab->aCol[iCol].zType; + zOriginCol = pTab->aCol[iCol].zName; + } + zOriginTab = pTab->zName; + if( pNC->pParse ){ + int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema); + zOriginDb = pNC->pParse->db->aDb[iDb].zName; + } + } + break; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_SELECT: { + /* The expression is a sub-select. Return the declaration type and + ** origin info for the single column in the result set of the SELECT + ** statement. + */ + NameContext sNC; + Select *pS = pExpr->pSelect; + Expr *p = pS->pEList->a[0].pExpr; + sNC.pSrcList = pS->pSrc; + sNC.pNext = pNC; + sNC.pParse = pNC->pParse; + zType = columnType(&sNC, p, &zOriginDb, &zOriginTab, &zOriginCol); + break; + } +#endif + } + + if( pzOriginDb ){ + assert( pzOriginTab && pzOriginCol ); + *pzOriginDb = zOriginDb; + *pzOriginTab = zOriginTab; + *pzOriginCol = zOriginCol; + } + return zType; +} + +/* +** Generate code that will tell the VDBE the declaration types of columns +** in the result set. +*/ +static void generateColumnTypes( + Parse *pParse, /* Parser context */ + SrcList *pTabList, /* List of tables */ + ExprList *pEList /* Expressions defining the result set */ +){ + Vdbe *v = pParse->pVdbe; + int i; + NameContext sNC; + sNC.pSrcList = pTabList; + sNC.pParse = pParse; + for(i=0; inExpr; i++){ + Expr *p = pEList->a[i].pExpr; + const char *zOrigDb = 0; + const char *zOrigTab = 0; + const char *zOrigCol = 0; + const char *zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol); + + /* The vdbe must make it's own copy of the column-type and other + ** column specific strings, in case the schema is reset before this + ** virtual machine is deleted. + */ + sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, P3_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, P3_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, P3_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, P3_TRANSIENT); + } +} + +/* +** Generate code that will tell the VDBE the names of columns +** in the result set. This information is used to provide the +** azCol[] values in the callback. +*/ +static void generateColumnNames( + Parse *pParse, /* Parser context */ + SrcList *pTabList, /* List of tables */ + ExprList *pEList /* Expressions defining the result set */ +){ + Vdbe *v = pParse->pVdbe; + int i, j; + sqlite3 *db = pParse->db; + int fullNames, shortNames; + +#ifndef SQLITE_OMIT_EXPLAIN + /* If this is an EXPLAIN, skip this step */ + if( pParse->explain ){ + return; + } +#endif + + assert( v!=0 ); + if( pParse->colNamesSet || v==0 || db->mallocFailed ) return; + pParse->colNamesSet = 1; + fullNames = (db->flags & SQLITE_FullColNames)!=0; + shortNames = (db->flags & SQLITE_ShortColNames)!=0; + sqlite3VdbeSetNumCols(v, pEList->nExpr); + for(i=0; inExpr; i++){ + Expr *p; + p = pEList->a[i].pExpr; + if( p==0 ) continue; + if( pEList->a[i].zName ){ + char *zName = pEList->a[i].zName; + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, strlen(zName)); + continue; + } + if( p->op==TK_COLUMN && pTabList ){ + Table *pTab; + char *zCol; + int iCol = p->iColumn; + for(j=0; jnSrc && pTabList->a[j].iCursor!=p->iTable; j++){} + assert( jnSrc ); + pTab = pTabList->a[j].pTab; + if( iCol<0 ) iCol = pTab->iPKey; + assert( iCol==-1 || (iCol>=0 && iColnCol) ); + if( iCol<0 ){ + zCol = "rowid"; + }else{ + zCol = pTab->aCol[iCol].zName; + } + if( !shortNames && !fullNames && p->span.z && p->span.z[0] ){ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, (char*)p->span.z, p->span.n); + }else if( fullNames || (!shortNames && pTabList->nSrc>1) ){ + char *zName = 0; + char *zTab; + + zTab = pTabList->a[j].zAlias; + if( fullNames || zTab==0 ) zTab = pTab->zName; + sqlite3SetString(&zName, zTab, ".", zCol, (char*)0); + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, P3_DYNAMIC); + }else{ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, strlen(zCol)); + } + }else if( p->span.z && p->span.z[0] ){ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, (char*)p->span.z, p->span.n); + /* sqlite3VdbeCompressSpace(v, addr); */ + }else{ + char zName[30]; + assert( p->op!=TK_COLUMN || pTabList==0 ); + sqlite3_snprintf(sizeof(zName), zName, "column%d", i+1); + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, 0); + } + } + generateColumnTypes(pParse, pTabList, pEList); +} + +#ifndef SQLITE_OMIT_COMPOUND_SELECT +/* +** Name of the connection operator, used for error messages. +*/ +static const char *selectOpName(int id){ + char *z; + switch( id ){ + case TK_ALL: z = "UNION ALL"; break; + case TK_INTERSECT: z = "INTERSECT"; break; + case TK_EXCEPT: z = "EXCEPT"; break; + default: z = "UNION"; break; + } + return z; +} +#endif /* SQLITE_OMIT_COMPOUND_SELECT */ + +/* +** Forward declaration +*/ +static int prepSelectStmt(Parse*, Select*); + +/* +** Given a SELECT statement, generate a Table structure that describes +** the result set of that SELECT. +*/ +Table *sqlite3ResultSetOfSelect(Parse *pParse, char *zTabName, Select *pSelect){ + Table *pTab; + int i, j; + ExprList *pEList; + Column *aCol, *pCol; + sqlite3 *db = pParse->db; + + while( pSelect->pPrior ) pSelect = pSelect->pPrior; + if( prepSelectStmt(pParse, pSelect) ){ + return 0; + } + if( sqlite3SelectResolve(pParse, pSelect, 0) ){ + return 0; + } + pTab = sqlite3DbMallocZero(db, sizeof(Table) ); + if( pTab==0 ){ + return 0; + } + pTab->nRef = 1; + pTab->zName = zTabName ? sqlite3DbStrDup(db, zTabName) : 0; + pEList = pSelect->pEList; + pTab->nCol = pEList->nExpr; + assert( pTab->nCol>0 ); + pTab->aCol = aCol = sqlite3DbMallocZero(db, sizeof(pTab->aCol[0])*pTab->nCol); + for(i=0, pCol=aCol; inCol; i++, pCol++){ + Expr *p, *pR; + char *zType; + char *zName; + int nName; + CollSeq *pColl; + int cnt; + NameContext sNC; + + /* Get an appropriate name for the column + */ + p = pEList->a[i].pExpr; + assert( p->pRight==0 || p->pRight->token.z==0 || p->pRight->token.z[0]!=0 ); + if( (zName = pEList->a[i].zName)!=0 ){ + /* If the column contains an "AS " phrase, use as the name */ + zName = sqlite3DbStrDup(db, zName); + }else if( p->op==TK_DOT + && (pR=p->pRight)!=0 && pR->token.z && pR->token.z[0] ){ + /* For columns of the from A.B use B as the name */ + zName = sqlite3MPrintf(db, "%T", &pR->token); + }else if( p->span.z && p->span.z[0] ){ + /* Use the original text of the column expression as its name */ + zName = sqlite3MPrintf(db, "%T", &p->span); + }else{ + /* If all else fails, make up a name */ + zName = sqlite3MPrintf(db, "column%d", i+1); + } + if( !zName || db->mallocFailed ){ + db->mallocFailed = 1; + sqlite3_free(zName); + sqlite3DeleteTable(pTab); + return 0; + } + sqlite3Dequote(zName); + + /* Make sure the column name is unique. If the name is not unique, + ** append a integer to the name so that it becomes unique. + */ + nName = strlen(zName); + for(j=cnt=0; jzName = zName; + + /* Get the typename, type affinity, and collating sequence for the + ** column. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pSrcList = pSelect->pSrc; + zType = sqlite3DbStrDup(db, columnType(&sNC, p, 0, 0, 0)); + pCol->zType = zType; + pCol->affinity = sqlite3ExprAffinity(p); + pColl = sqlite3ExprCollSeq(pParse, p); + if( pColl ){ + pCol->zColl = sqlite3DbStrDup(db, pColl->zName); + } + } + pTab->iPKey = -1; + return pTab; +} + +/* +** Prepare a SELECT statement for processing by doing the following +** things: +** +** (1) Make sure VDBE cursor numbers have been assigned to every +** element of the FROM clause. +** +** (2) Fill in the pTabList->a[].pTab fields in the SrcList that +** defines FROM clause. When views appear in the FROM clause, +** fill pTabList->a[].pSelect with a copy of the SELECT statement +** that implements the view. A copy is made of the view's SELECT +** statement so that we can freely modify or delete that statement +** without worrying about messing up the presistent representation +** of the view. +** +** (3) Add terms to the WHERE clause to accomodate the NATURAL keyword +** on joins and the ON and USING clause of joins. +** +** (4) Scan the list of columns in the result set (pEList) looking +** for instances of the "*" operator or the TABLE.* operator. +** If found, expand each "*" to be every column in every table +** and TABLE.* to be every column in TABLE. +** +** Return 0 on success. If there are problems, leave an error message +** in pParse and return non-zero. +*/ +static int prepSelectStmt(Parse *pParse, Select *p){ + int i, j, k, rc; + SrcList *pTabList; + ExprList *pEList; + struct SrcList_item *pFrom; + sqlite3 *db = pParse->db; + + if( p==0 || p->pSrc==0 || db->mallocFailed ){ + return 1; + } + pTabList = p->pSrc; + pEList = p->pEList; + + /* Make sure cursor numbers have been assigned to all entries in + ** the FROM clause of the SELECT statement. + */ + sqlite3SrcListAssignCursors(pParse, p->pSrc); + + /* Look up every table named in the FROM clause of the select. If + ** an entry of the FROM clause is a subquery instead of a table or view, + ** then create a transient table structure to describe the subquery. + */ + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab; + if( pFrom->pTab!=0 ){ + /* This statement has already been prepared. There is no need + ** to go further. */ + assert( i==0 ); + return 0; + } + if( pFrom->zName==0 ){ +#ifndef SQLITE_OMIT_SUBQUERY + /* A sub-query in the FROM clause of a SELECT */ + assert( pFrom->pSelect!=0 ); + if( pFrom->zAlias==0 ){ + pFrom->zAlias = + sqlite3MPrintf(db, "sqlite_subquery_%p_", (void*)pFrom->pSelect); + } + assert( pFrom->pTab==0 ); + pFrom->pTab = pTab = + sqlite3ResultSetOfSelect(pParse, pFrom->zAlias, pFrom->pSelect); + if( pTab==0 ){ + return 1; + } + /* The isEphem flag indicates that the Table structure has been + ** dynamically allocated and may be freed at any time. In other words, + ** pTab is not pointing to a persistent table structure that defines + ** part of the schema. */ + pTab->isEphem = 1; +#endif + }else{ + /* An ordinary table or view name in the FROM clause */ + assert( pFrom->pTab==0 ); + pFrom->pTab = pTab = + sqlite3LocateTable(pParse,pFrom->zName,pFrom->zDatabase); + if( pTab==0 ){ + return 1; + } + pTab->nRef++; +#if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE) + if( pTab->pSelect || IsVirtual(pTab) ){ + /* We reach here if the named table is a really a view */ + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + return 1; + } + /* If pFrom->pSelect!=0 it means we are dealing with a + ** view within a view. The SELECT structure has already been + ** copied by the outer view so we can skip the copy step here + ** in the inner view. + */ + if( pFrom->pSelect==0 ){ + pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect); + } + } +#endif + } + } + + /* Process NATURAL keywords, and ON and USING clauses of joins. + */ + if( sqliteProcessJoin(pParse, p) ) return 1; + + /* For every "*" that occurs in the column list, insert the names of + ** all columns in all tables. And for every TABLE.* insert the names + ** of all columns in TABLE. The parser inserted a special expression + ** with the TK_ALL operator for each "*" that it found in the column list. + ** The following code just has to locate the TK_ALL expressions and expand + ** each one to the list of all columns in all tables. + ** + ** The first loop just checks to see if there are any "*" operators + ** that need expanding. + */ + for(k=0; knExpr; k++){ + Expr *pE = pEList->a[k].pExpr; + if( pE->op==TK_ALL ) break; + if( pE->op==TK_DOT && pE->pRight && pE->pRight->op==TK_ALL + && pE->pLeft && pE->pLeft->op==TK_ID ) break; + } + rc = 0; + if( knExpr ){ + /* + ** If we get here it means the result set contains one or more "*" + ** operators that need to be expanded. Loop through each expression + ** in the result set and expand them one by one. + */ + struct ExprList_item *a = pEList->a; + ExprList *pNew = 0; + int flags = pParse->db->flags; + int longNames = (flags & SQLITE_FullColNames)!=0 && + (flags & SQLITE_ShortColNames)==0; + + for(k=0; knExpr; k++){ + Expr *pE = a[k].pExpr; + if( pE->op!=TK_ALL && + (pE->op!=TK_DOT || pE->pRight==0 || pE->pRight->op!=TK_ALL) ){ + /* This particular expression does not need to be expanded. + */ + pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr, 0); + if( pNew ){ + pNew->a[pNew->nExpr-1].zName = a[k].zName; + }else{ + rc = 1; + } + a[k].pExpr = 0; + a[k].zName = 0; + }else{ + /* This expression is a "*" or a "TABLE.*" and needs to be + ** expanded. */ + int tableSeen = 0; /* Set to 1 when TABLE matches */ + char *zTName; /* text of name of TABLE */ + if( pE->op==TK_DOT && pE->pLeft ){ + zTName = sqlite3NameFromToken(db, &pE->pLeft->token); + }else{ + zTName = 0; + } + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab = pFrom->pTab; + char *zTabName = pFrom->zAlias; + if( zTabName==0 || zTabName[0]==0 ){ + zTabName = pTab->zName; + } + if( zTName && (zTabName==0 || zTabName[0]==0 || + sqlite3StrICmp(zTName, zTabName)!=0) ){ + continue; + } + tableSeen = 1; + for(j=0; jnCol; j++){ + Expr *pExpr, *pRight; + char *zName = pTab->aCol[j].zName; + + /* If a column is marked as 'hidden' (currently only possible + ** for virtual tables), do not include it in the expanded + ** result-set list. + */ + if( IsHiddenColumn(&pTab->aCol[j]) ){ + assert(IsVirtual(pTab)); + continue; + } + + if( i>0 ){ + struct SrcList_item *pLeft = &pTabList->a[i-1]; + if( (pLeft[1].jointype & JT_NATURAL)!=0 && + columnIndex(pLeft->pTab, zName)>=0 ){ + /* In a NATURAL join, omit the join columns from the + ** table on the right */ + continue; + } + if( sqlite3IdListIndex(pLeft[1].pUsing, zName)>=0 ){ + /* In a join with a USING clause, omit columns in the + ** using clause from the table on the right. */ + continue; + } + } + pRight = sqlite3PExpr(pParse, TK_ID, 0, 0, 0); + if( pRight==0 ) break; + setQuotedToken(pParse, &pRight->token, zName); + if( zTabName && (longNames || pTabList->nSrc>1) ){ + Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, 0); + pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); + if( pExpr==0 ) break; + setQuotedToken(pParse, &pLeft->token, zTabName); + setToken(&pExpr->span, + sqlite3MPrintf(db, "%s.%s", zTabName, zName)); + pExpr->span.dyn = 1; + pExpr->token.z = 0; + pExpr->token.n = 0; + pExpr->token.dyn = 0; + }else{ + pExpr = pRight; + pExpr->span = pExpr->token; + pExpr->span.dyn = 0; + } + if( longNames ){ + pNew = sqlite3ExprListAppend(pParse, pNew, pExpr, &pExpr->span); + }else{ + pNew = sqlite3ExprListAppend(pParse, pNew, pExpr, &pRight->token); + } + } + } + if( !tableSeen ){ + if( zTName ){ + sqlite3ErrorMsg(pParse, "no such table: %s", zTName); + }else{ + sqlite3ErrorMsg(pParse, "no tables specified"); + } + rc = 1; + } + sqlite3_free(zTName); + } + } + sqlite3ExprListDelete(pEList); + p->pEList = pNew; + } + if( p->pEList && p->pEList->nExpr>SQLITE_MAX_COLUMN ){ + sqlite3ErrorMsg(pParse, "too many columns in result set"); + rc = SQLITE_ERROR; + } + if( db->mallocFailed ){ + rc = SQLITE_NOMEM; + } + return rc; +} + +#ifndef SQLITE_OMIT_COMPOUND_SELECT +/* +** This routine associates entries in an ORDER BY expression list with +** columns in a result. For each ORDER BY expression, the opcode of +** the top-level node is changed to TK_COLUMN and the iColumn value of +** the top-level node is filled in with column number and the iTable +** value of the top-level node is filled with iTable parameter. +** +** If there are prior SELECT clauses, they are processed first. A match +** in an earlier SELECT takes precedence over a later SELECT. +** +** Any entry that does not match is flagged as an error. The number +** of errors is returned. +*/ +static int matchOrderbyToColumn( + Parse *pParse, /* A place to leave error messages */ + Select *pSelect, /* Match to result columns of this SELECT */ + ExprList *pOrderBy, /* The ORDER BY values to match against columns */ + int iTable, /* Insert this value in iTable */ + int mustComplete /* If TRUE all ORDER BYs must match */ +){ + int nErr = 0; + int i, j; + ExprList *pEList; + sqlite3 *db = pParse->db; + + if( pSelect==0 || pOrderBy==0 ) return 1; + if( mustComplete ){ + for(i=0; inExpr; i++){ pOrderBy->a[i].done = 0; } + } + if( prepSelectStmt(pParse, pSelect) ){ + return 1; + } + if( pSelect->pPrior ){ + if( matchOrderbyToColumn(pParse, pSelect->pPrior, pOrderBy, iTable, 0) ){ + return 1; + } + } + pEList = pSelect->pEList; + for(i=0; inExpr; i++){ + struct ExprList_item *pItem; + Expr *pE = pOrderBy->a[i].pExpr; + int iCol = -1; + char *zLabel; + + if( pOrderBy->a[i].done ) continue; + if( sqlite3ExprIsInteger(pE, &iCol) ){ + if( iCol<=0 || iCol>pEList->nExpr ){ + sqlite3ErrorMsg(pParse, + "ORDER BY position %d should be between 1 and %d", + iCol, pEList->nExpr); + nErr++; + break; + } + if( !mustComplete ) continue; + iCol--; + } + if( iCol<0 && (zLabel = sqlite3NameFromToken(db, &pE->token))!=0 ){ + for(j=0, pItem=pEList->a; jnExpr; j++, pItem++){ + char *zName; + int isMatch; + if( pItem->zName ){ + zName = sqlite3DbStrDup(db, pItem->zName); + }else{ + zName = sqlite3NameFromToken(db, &pItem->pExpr->token); + } + isMatch = zName && sqlite3StrICmp(zName, zLabel)==0; + sqlite3_free(zName); + if( isMatch ){ + iCol = j; + break; + } + } + sqlite3_free(zLabel); + } + if( iCol>=0 ){ + pE->op = TK_COLUMN; + pE->iColumn = iCol; + pE->iTable = iTable; + pE->iAgg = -1; + pOrderBy->a[i].done = 1; + }else if( mustComplete ){ + sqlite3ErrorMsg(pParse, + "ORDER BY term number %d does not match any result column", i+1); + nErr++; + break; + } + } + return nErr; +} +#endif /* #ifndef SQLITE_OMIT_COMPOUND_SELECT */ + +/* +** Get a VDBE for the given parser context. Create a new one if necessary. +** If an error occurs, return NULL and leave a message in pParse. +*/ +Vdbe *sqlite3GetVdbe(Parse *pParse){ + Vdbe *v = pParse->pVdbe; + if( v==0 ){ + v = pParse->pVdbe = sqlite3VdbeCreate(pParse->db); + } + return v; +} + + +/* +** Compute the iLimit and iOffset fields of the SELECT based on the +** pLimit and pOffset expressions. pLimit and pOffset hold the expressions +** that appear in the original SQL statement after the LIMIT and OFFSET +** keywords. Or NULL if those keywords are omitted. iLimit and iOffset +** are the integer memory register numbers for counters used to compute +** the limit and offset. If there is no limit and/or offset, then +** iLimit and iOffset are negative. +** +** This routine changes the values of iLimit and iOffset only if +** a limit or offset is defined by pLimit and pOffset. iLimit and +** iOffset should have been preset to appropriate default values +** (usually but not always -1) prior to calling this routine. +** Only if pLimit!=0 or pOffset!=0 do the limit registers get +** redefined. The UNION ALL operator uses this property to force +** the reuse of the same limit and offset registers across multiple +** SELECT statements. +*/ +static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ + Vdbe *v = 0; + int iLimit = 0; + int iOffset; + int addr1, addr2; + + /* + ** "LIMIT -1" always shows all rows. There is some + ** contraversy about what the correct behavior should be. + ** The current implementation interprets "LIMIT 0" to mean + ** no rows. + */ + if( p->pLimit ){ + p->iLimit = iLimit = pParse->nMem; + pParse->nMem += 2; + v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + sqlite3ExprCode(pParse, p->pLimit); + sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); + sqlite3VdbeAddOp(v, OP_MemStore, iLimit, 1); + VdbeComment((v, "# LIMIT counter")); + sqlite3VdbeAddOp(v, OP_IfMemZero, iLimit, iBreak); + sqlite3VdbeAddOp(v, OP_MemLoad, iLimit, 0); + } + if( p->pOffset ){ + p->iOffset = iOffset = pParse->nMem++; + v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + sqlite3ExprCode(pParse, p->pOffset); + sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); + sqlite3VdbeAddOp(v, OP_MemStore, iOffset, p->pLimit==0); + VdbeComment((v, "# OFFSET counter")); + addr1 = sqlite3VdbeAddOp(v, OP_IfMemPos, iOffset, 0); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp(v, OP_Integer, 0, 0); + sqlite3VdbeJumpHere(v, addr1); + if( p->pLimit ){ + sqlite3VdbeAddOp(v, OP_Add, 0, 0); + } + } + if( p->pLimit ){ + addr1 = sqlite3VdbeAddOp(v, OP_IfMemPos, iLimit, 0); + sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp(v, OP_MemInt, -1, iLimit+1); + addr2 = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp(v, OP_MemStore, iLimit+1, 1); + VdbeComment((v, "# LIMIT+OFFSET")); + sqlite3VdbeJumpHere(v, addr2); + } +} + +/* +** Allocate a virtual index to use for sorting. +*/ +static void createSortingIndex(Parse *pParse, Select *p, ExprList *pOrderBy){ + if( pOrderBy ){ + int addr; + assert( pOrderBy->iECursor==0 ); + pOrderBy->iECursor = pParse->nTab++; + addr = sqlite3VdbeAddOp(pParse->pVdbe, OP_OpenEphemeral, + pOrderBy->iECursor, pOrderBy->nExpr+1); + assert( p->addrOpenEphm[2] == -1 ); + p->addrOpenEphm[2] = addr; + } +} + +#ifndef SQLITE_OMIT_COMPOUND_SELECT +/* +** Return the appropriate collating sequence for the iCol-th column of +** the result set for the compound-select statement "p". Return NULL if +** the column has no default collating sequence. +** +** The collating sequence for the compound select is taken from the +** left-most term of the select that has a collating sequence. +*/ +static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){ + CollSeq *pRet; + if( p->pPrior ){ + pRet = multiSelectCollSeq(pParse, p->pPrior, iCol); + }else{ + pRet = 0; + } + if( pRet==0 ){ + pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr); + } + return pRet; +} +#endif /* SQLITE_OMIT_COMPOUND_SELECT */ + +#ifndef SQLITE_OMIT_COMPOUND_SELECT +/* +** This routine is called to process a query that is really the union +** or intersection of two or more separate queries. +** +** "p" points to the right-most of the two queries. the query on the +** left is p->pPrior. The left query could also be a compound query +** in which case this routine will be called recursively. +** +** The results of the total query are to be written into a destination +** of type eDest with parameter iParm. +** +** Example 1: Consider a three-way compound SQL statement. +** +** SELECT a FROM t1 UNION SELECT b FROM t2 UNION SELECT c FROM t3 +** +** This statement is parsed up as follows: +** +** SELECT c FROM t3 +** | +** `-----> SELECT b FROM t2 +** | +** `------> SELECT a FROM t1 +** +** The arrows in the diagram above represent the Select.pPrior pointer. +** So if this routine is called with p equal to the t3 query, then +** pPrior will be the t2 query. p->op will be TK_UNION in this case. +** +** Notice that because of the way SQLite parses compound SELECTs, the +** individual selects always group from left to right. +*/ +static int multiSelect( + Parse *pParse, /* Parsing context */ + Select *p, /* The right-most of SELECTs to be coded */ + int eDest, /* \___ Store query results as specified */ + int iParm, /* / by these two parameters. */ + char *aff /* If eDest is SRT_Union, the affinity string */ +){ + int rc = SQLITE_OK; /* Success code from a subroutine */ + Select *pPrior; /* Another SELECT immediately to our left */ + Vdbe *v; /* Generate code to this VDBE */ + int nCol; /* Number of columns in the result set */ + ExprList *pOrderBy; /* The ORDER BY clause on p */ + int aSetP2[2]; /* Set P2 value of these op to number of columns */ + int nSetP2 = 0; /* Number of slots in aSetP2[] used */ + + /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only + ** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT. + */ + if( p==0 || p->pPrior==0 ){ + rc = 1; + goto multi_select_end; + } + pPrior = p->pPrior; + assert( pPrior->pRightmost!=pPrior ); + assert( pPrior->pRightmost==p->pRightmost ); + if( pPrior->pOrderBy ){ + sqlite3ErrorMsg(pParse,"ORDER BY clause should come after %s not before", + selectOpName(p->op)); + rc = 1; + goto multi_select_end; + } + if( pPrior->pLimit ){ + sqlite3ErrorMsg(pParse,"LIMIT clause should come after %s not before", + selectOpName(p->op)); + rc = 1; + goto multi_select_end; + } + + /* Make sure we have a valid query engine. If not, create a new one. + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ){ + rc = 1; + goto multi_select_end; + } + + /* Create the destination temporary table if necessary + */ + if( eDest==SRT_EphemTab ){ + assert( p->pEList ); + assert( nSetP2pOrderBy; + switch( p->op ){ + case TK_ALL: { + if( pOrderBy==0 ){ + int addr = 0; + assert( !pPrior->pLimit ); + pPrior->pLimit = p->pLimit; + pPrior->pOffset = p->pOffset; + rc = sqlite3Select(pParse, pPrior, eDest, iParm, 0, 0, 0, aff); + p->pLimit = 0; + p->pOffset = 0; + if( rc ){ + goto multi_select_end; + } + p->pPrior = 0; + p->iLimit = pPrior->iLimit; + p->iOffset = pPrior->iOffset; + if( p->iLimit>=0 ){ + addr = sqlite3VdbeAddOp(v, OP_IfMemZero, p->iLimit, 0); + VdbeComment((v, "# Jump ahead if LIMIT reached")); + } + rc = sqlite3Select(pParse, p, eDest, iParm, 0, 0, 0, aff); + p->pPrior = pPrior; + if( rc ){ + goto multi_select_end; + } + if( addr ){ + sqlite3VdbeJumpHere(v, addr); + } + break; + } + /* For UNION ALL ... ORDER BY fall through to the next case */ + } + case TK_EXCEPT: + case TK_UNION: { + int unionTab; /* Cursor number of the temporary table holding result */ + int op = 0; /* One of the SRT_ operations to apply to self */ + int priorOp; /* The SRT_ operation to apply to prior selects */ + Expr *pLimit, *pOffset; /* Saved values of p->nLimit and p->nOffset */ + int addr; + + priorOp = p->op==TK_ALL ? SRT_Table : SRT_Union; + if( eDest==priorOp && pOrderBy==0 && !p->pLimit && !p->pOffset ){ + /* We can reuse a temporary table generated by a SELECT to our + ** right. + */ + unionTab = iParm; + }else{ + /* We will need to create our own temporary table to hold the + ** intermediate results. + */ + unionTab = pParse->nTab++; + if( pOrderBy && matchOrderbyToColumn(pParse, p, pOrderBy, unionTab,1) ){ + rc = 1; + goto multi_select_end; + } + addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, unionTab, 0); + if( priorOp==SRT_Table ){ + assert( nSetP2addrOpenEphm[0] == -1 ); + p->addrOpenEphm[0] = addr; + p->pRightmost->usesEphm = 1; + } + createSortingIndex(pParse, p, pOrderBy); + assert( p->pEList ); + } + + /* Code the SELECT statements to our left + */ + assert( !pPrior->pOrderBy ); + rc = sqlite3Select(pParse, pPrior, priorOp, unionTab, 0, 0, 0, aff); + if( rc ){ + goto multi_select_end; + } + + /* Code the current SELECT statement + */ + switch( p->op ){ + case TK_EXCEPT: op = SRT_Except; break; + case TK_UNION: op = SRT_Union; break; + case TK_ALL: op = SRT_Table; break; + } + p->pPrior = 0; + p->pOrderBy = 0; + p->disallowOrderBy = pOrderBy!=0; + pLimit = p->pLimit; + p->pLimit = 0; + pOffset = p->pOffset; + p->pOffset = 0; + rc = sqlite3Select(pParse, p, op, unionTab, 0, 0, 0, aff); + /* Query flattening in sqlite3Select() might refill p->pOrderBy. + ** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */ + sqlite3ExprListDelete(p->pOrderBy); + p->pPrior = pPrior; + p->pOrderBy = pOrderBy; + sqlite3ExprDelete(p->pLimit); + p->pLimit = pLimit; + p->pOffset = pOffset; + p->iLimit = -1; + p->iOffset = -1; + if( rc ){ + goto multi_select_end; + } + + + /* Convert the data in the temporary table into whatever form + ** it is that we currently need. + */ + if( eDest!=priorOp || unionTab!=iParm ){ + int iCont, iBreak, iStart; + assert( p->pEList ); + if( eDest==SRT_Callback ){ + Select *pFirst = p; + while( pFirst->pPrior ) pFirst = pFirst->pPrior; + generateColumnNames(pParse, 0, pFirst->pEList); + } + iBreak = sqlite3VdbeMakeLabel(v); + iCont = sqlite3VdbeMakeLabel(v); + computeLimitRegisters(pParse, p, iBreak); + sqlite3VdbeAddOp(v, OP_Rewind, unionTab, iBreak); + iStart = sqlite3VdbeCurrentAddr(v); + rc = selectInnerLoop(pParse, p, p->pEList, unionTab, p->pEList->nExpr, + pOrderBy, -1, eDest, iParm, + iCont, iBreak, 0); + if( rc ){ + rc = 1; + goto multi_select_end; + } + sqlite3VdbeResolveLabel(v, iCont); + sqlite3VdbeAddOp(v, OP_Next, unionTab, iStart); + sqlite3VdbeResolveLabel(v, iBreak); + sqlite3VdbeAddOp(v, OP_Close, unionTab, 0); + } + break; + } + case TK_INTERSECT: { + int tab1, tab2; + int iCont, iBreak, iStart; + Expr *pLimit, *pOffset; + int addr; + + /* INTERSECT is different from the others since it requires + ** two temporary tables. Hence it has its own case. Begin + ** by allocating the tables we will need. + */ + tab1 = pParse->nTab++; + tab2 = pParse->nTab++; + if( pOrderBy && matchOrderbyToColumn(pParse,p,pOrderBy,tab1,1) ){ + rc = 1; + goto multi_select_end; + } + createSortingIndex(pParse, p, pOrderBy); + + addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, tab1, 0); + assert( p->addrOpenEphm[0] == -1 ); + p->addrOpenEphm[0] = addr; + p->pRightmost->usesEphm = 1; + assert( p->pEList ); + + /* Code the SELECTs to our left into temporary table "tab1". + */ + rc = sqlite3Select(pParse, pPrior, SRT_Union, tab1, 0, 0, 0, aff); + if( rc ){ + goto multi_select_end; + } + + /* Code the current SELECT into temporary table "tab2" + */ + addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, tab2, 0); + assert( p->addrOpenEphm[1] == -1 ); + p->addrOpenEphm[1] = addr; + p->pPrior = 0; + pLimit = p->pLimit; + p->pLimit = 0; + pOffset = p->pOffset; + p->pOffset = 0; + rc = sqlite3Select(pParse, p, SRT_Union, tab2, 0, 0, 0, aff); + p->pPrior = pPrior; + sqlite3ExprDelete(p->pLimit); + p->pLimit = pLimit; + p->pOffset = pOffset; + if( rc ){ + goto multi_select_end; + } + + /* Generate code to take the intersection of the two temporary + ** tables. + */ + assert( p->pEList ); + if( eDest==SRT_Callback ){ + Select *pFirst = p; + while( pFirst->pPrior ) pFirst = pFirst->pPrior; + generateColumnNames(pParse, 0, pFirst->pEList); + } + iBreak = sqlite3VdbeMakeLabel(v); + iCont = sqlite3VdbeMakeLabel(v); + computeLimitRegisters(pParse, p, iBreak); + sqlite3VdbeAddOp(v, OP_Rewind, tab1, iBreak); + iStart = sqlite3VdbeAddOp(v, OP_RowKey, tab1, 0); + sqlite3VdbeAddOp(v, OP_NotFound, tab2, iCont); + rc = selectInnerLoop(pParse, p, p->pEList, tab1, p->pEList->nExpr, + pOrderBy, -1, eDest, iParm, + iCont, iBreak, 0); + if( rc ){ + rc = 1; + goto multi_select_end; + } + sqlite3VdbeResolveLabel(v, iCont); + sqlite3VdbeAddOp(v, OP_Next, tab1, iStart); + sqlite3VdbeResolveLabel(v, iBreak); + sqlite3VdbeAddOp(v, OP_Close, tab2, 0); + sqlite3VdbeAddOp(v, OP_Close, tab1, 0); + break; + } + } + + /* Make sure all SELECTs in the statement have the same number of elements + ** in their result sets. + */ + assert( p->pEList && pPrior->pEList ); + if( p->pEList->nExpr!=pPrior->pEList->nExpr ){ + sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" + " do not have the same number of result columns", selectOpName(p->op)); + rc = 1; + goto multi_select_end; + } + + /* Set the number of columns in temporary tables + */ + nCol = p->pEList->nExpr; + while( nSetP2 ){ + sqlite3VdbeChangeP2(v, aSetP2[--nSetP2], nCol); + } + + /* Compute collating sequences used by either the ORDER BY clause or + ** by any temporary tables needed to implement the compound select. + ** Attach the KeyInfo structure to all temporary tables. Invoke the + ** ORDER BY processing if there is an ORDER BY clause. + ** + ** This section is run by the right-most SELECT statement only. + ** SELECT statements to the left always skip this part. The right-most + ** SELECT might also skip this part if it has no ORDER BY clause and + ** no temp tables are required. + */ + if( pOrderBy || p->usesEphm ){ + int i; /* Loop counter */ + KeyInfo *pKeyInfo; /* Collating sequence for the result set */ + Select *pLoop; /* For looping through SELECT statements */ + int nKeyCol; /* Number of entries in pKeyInfo->aCol[] */ + CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */ + CollSeq **aCopy; /* A copy of pKeyInfo->aColl[] */ + + assert( p->pRightmost==p ); + nKeyCol = nCol + (pOrderBy ? pOrderBy->nExpr : 0); + pKeyInfo = sqlite3DbMallocZero(pParse->db, + sizeof(*pKeyInfo)+nKeyCol*(sizeof(CollSeq*) + 1)); + if( !pKeyInfo ){ + rc = SQLITE_NOMEM; + goto multi_select_end; + } + + pKeyInfo->enc = ENC(pParse->db); + pKeyInfo->nField = nCol; + + for(i=0, apColl=pKeyInfo->aColl; idb->pDfltColl; + } + } + + for(pLoop=p; pLoop; pLoop=pLoop->pPrior){ + for(i=0; i<2; i++){ + int addr = pLoop->addrOpenEphm[i]; + if( addr<0 ){ + /* If [0] is unused then [1] is also unused. So we can + ** always safely abort as soon as the first unused slot is found */ + assert( pLoop->addrOpenEphm[1]<0 ); + break; + } + sqlite3VdbeChangeP2(v, addr, nCol); + sqlite3VdbeChangeP3(v, addr, (char*)pKeyInfo, P3_KEYINFO); + pLoop->addrOpenEphm[i] = -1; + } + } + + if( pOrderBy ){ + struct ExprList_item *pOTerm = pOrderBy->a; + int nOrderByExpr = pOrderBy->nExpr; + int addr; + u8 *pSortOrder; + + /* Reuse the same pKeyInfo for the ORDER BY as was used above for + ** the compound select statements. Except we have to change out the + ** pKeyInfo->aColl[] values. Some of the aColl[] values will be + ** reused when constructing the pKeyInfo for the ORDER BY, so make + ** a copy. Sufficient space to hold both the nCol entries for + ** the compound select and the nOrderbyExpr entries for the ORDER BY + ** was allocated above. But we need to move the compound select + ** entries out of the way before constructing the ORDER BY entries. + ** Move the compound select entries into aCopy[] where they can be + ** accessed and reused when constructing the ORDER BY entries. + ** Because nCol might be greater than or less than nOrderByExpr + ** we have to use memmove() when doing the copy. + */ + aCopy = &pKeyInfo->aColl[nOrderByExpr]; + pSortOrder = pKeyInfo->aSortOrder = (u8*)&aCopy[nCol]; + memmove(aCopy, pKeyInfo->aColl, nCol*sizeof(CollSeq*)); + + apColl = pKeyInfo->aColl; + for(i=0; ipExpr; + if( (pExpr->flags & EP_ExpCollate) ){ + assert( pExpr->pColl!=0 ); + *apColl = pExpr->pColl; + }else{ + *apColl = aCopy[pExpr->iColumn]; + } + *pSortOrder = pOTerm->sortOrder; + } + assert( p->pRightmost==p ); + assert( p->addrOpenEphm[2]>=0 ); + addr = p->addrOpenEphm[2]; + sqlite3VdbeChangeP2(v, addr, p->pOrderBy->nExpr+2); + pKeyInfo->nField = nOrderByExpr; + sqlite3VdbeChangeP3(v, addr, (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + pKeyInfo = 0; + generateSortTail(pParse, p, v, p->pEList->nExpr, eDest, iParm); + } + + sqlite3_free(pKeyInfo); + } + +multi_select_end: + return rc; +} +#endif /* SQLITE_OMIT_COMPOUND_SELECT */ + +#ifndef SQLITE_OMIT_VIEW +/* Forward Declarations */ +static void substExprList(sqlite3*, ExprList*, int, ExprList*); +static void substSelect(sqlite3*, Select *, int, ExprList *); + +/* +** Scan through the expression pExpr. Replace every reference to +** a column in table number iTable with a copy of the iColumn-th +** entry in pEList. (But leave references to the ROWID column +** unchanged.) +** +** This routine is part of the flattening procedure. A subquery +** whose result set is defined by pEList appears as entry in the +** FROM clause of a SELECT such that the VDBE cursor assigned to that +** FORM clause entry is iTable. This routine make the necessary +** changes to pExpr so that it refers directly to the source table +** of the subquery rather the result set of the subquery. +*/ +static void substExpr( + sqlite3 *db, /* Report malloc errors to this connection */ + Expr *pExpr, /* Expr in which substitution occurs */ + int iTable, /* Table to be substituted */ + ExprList *pEList /* Substitute expressions */ +){ + if( pExpr==0 ) return; + if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){ + if( pExpr->iColumn<0 ){ + pExpr->op = TK_NULL; + }else{ + Expr *pNew; + assert( pEList!=0 && pExpr->iColumnnExpr ); + assert( pExpr->pLeft==0 && pExpr->pRight==0 && pExpr->pList==0 ); + pNew = pEList->a[pExpr->iColumn].pExpr; + assert( pNew!=0 ); + pExpr->op = pNew->op; + assert( pExpr->pLeft==0 ); + pExpr->pLeft = sqlite3ExprDup(db, pNew->pLeft); + assert( pExpr->pRight==0 ); + pExpr->pRight = sqlite3ExprDup(db, pNew->pRight); + assert( pExpr->pList==0 ); + pExpr->pList = sqlite3ExprListDup(db, pNew->pList); + pExpr->iTable = pNew->iTable; + pExpr->pTab = pNew->pTab; + pExpr->iColumn = pNew->iColumn; + pExpr->iAgg = pNew->iAgg; + sqlite3TokenCopy(db, &pExpr->token, &pNew->token); + sqlite3TokenCopy(db, &pExpr->span, &pNew->span); + pExpr->pSelect = sqlite3SelectDup(db, pNew->pSelect); + pExpr->flags = pNew->flags; + } + }else{ + substExpr(db, pExpr->pLeft, iTable, pEList); + substExpr(db, pExpr->pRight, iTable, pEList); + substSelect(db, pExpr->pSelect, iTable, pEList); + substExprList(db, pExpr->pList, iTable, pEList); + } +} +static void substExprList( + sqlite3 *db, /* Report malloc errors here */ + ExprList *pList, /* List to scan and in which to make substitutes */ + int iTable, /* Table to be substituted */ + ExprList *pEList /* Substitute values */ +){ + int i; + if( pList==0 ) return; + for(i=0; inExpr; i++){ + substExpr(db, pList->a[i].pExpr, iTable, pEList); + } +} +static void substSelect( + sqlite3 *db, /* Report malloc errors here */ + Select *p, /* SELECT statement in which to make substitutions */ + int iTable, /* Table to be replaced */ + ExprList *pEList /* Substitute values */ +){ + if( !p ) return; + substExprList(db, p->pEList, iTable, pEList); + substExprList(db, p->pGroupBy, iTable, pEList); + substExprList(db, p->pOrderBy, iTable, pEList); + substExpr(db, p->pHaving, iTable, pEList); + substExpr(db, p->pWhere, iTable, pEList); + substSelect(db, p->pPrior, iTable, pEList); +} +#endif /* !defined(SQLITE_OMIT_VIEW) */ + +#ifndef SQLITE_OMIT_VIEW +/* +** This routine attempts to flatten subqueries in order to speed +** execution. It returns 1 if it makes changes and 0 if no flattening +** occurs. +** +** To understand the concept of flattening, consider the following +** query: +** +** SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5 +** +** The default way of implementing this query is to execute the +** subquery first and store the results in a temporary table, then +** run the outer query on that temporary table. This requires two +** passes over the data. Furthermore, because the temporary table +** has no indices, the WHERE clause on the outer query cannot be +** optimized. +** +** This routine attempts to rewrite queries such as the above into +** a single flat select, like this: +** +** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5 +** +** The code generated for this simpification gives the same result +** but only has to scan the data once. And because indices might +** exist on the table t1, a complete scan of the data might be +** avoided. +** +** Flattening is only attempted if all of the following are true: +** +** (1) The subquery and the outer query do not both use aggregates. +** +** (2) The subquery is not an aggregate or the outer query is not a join. +** +** (3) The subquery is not the right operand of a left outer join, or +** the subquery is not itself a join. (Ticket #306) +** +** (4) The subquery is not DISTINCT or the outer query is not a join. +** +** (5) The subquery is not DISTINCT or the outer query does not use +** aggregates. +** +** (6) The subquery does not use aggregates or the outer query is not +** DISTINCT. +** +** (7) The subquery has a FROM clause. +** +** (8) The subquery does not use LIMIT or the outer query is not a join. +** +** (9) The subquery does not use LIMIT or the outer query does not use +** aggregates. +** +** (10) The subquery does not use aggregates or the outer query does not +** use LIMIT. +** +** (11) The subquery and the outer query do not both have ORDER BY clauses. +** +** (12) The subquery is not the right term of a LEFT OUTER JOIN or the +** subquery has no WHERE clause. (added by ticket #350) +** +** (13) The subquery and outer query do not both use LIMIT +** +** (14) The subquery does not use OFFSET +** +** (15) The outer query is not part of a compound select or the +** subquery does not have both an ORDER BY and a LIMIT clause. +** (See ticket #2339) +** +** In this routine, the "p" parameter is a pointer to the outer query. +** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query +** uses aggregates and subqueryIsAgg is true if the subquery uses aggregates. +** +** If flattening is not attempted, this routine is a no-op and returns 0. +** If flattening is attempted this routine returns 1. +** +** All of the expression analysis must occur on both the outer query and +** the subquery before this routine runs. +*/ +static int flattenSubquery( + sqlite3 *db, /* Database connection */ + Select *p, /* The parent or outer SELECT statement */ + int iFrom, /* Index in p->pSrc->a[] of the inner subquery */ + int isAgg, /* True if outer SELECT uses aggregate functions */ + int subqueryIsAgg /* True if the subquery uses aggregate functions */ +){ + Select *pSub; /* The inner query or "subquery" */ + SrcList *pSrc; /* The FROM clause of the outer query */ + SrcList *pSubSrc; /* The FROM clause of the subquery */ + ExprList *pList; /* The result set of the outer query */ + int iParent; /* VDBE cursor number of the pSub result set temp table */ + int i; /* Loop counter */ + Expr *pWhere; /* The WHERE clause */ + struct SrcList_item *pSubitem; /* The subquery */ + + /* Check to see if flattening is permitted. Return 0 if not. + */ + if( p==0 ) return 0; + pSrc = p->pSrc; + assert( pSrc && iFrom>=0 && iFromnSrc ); + pSubitem = &pSrc->a[iFrom]; + pSub = pSubitem->pSelect; + assert( pSub!=0 ); + if( isAgg && subqueryIsAgg ) return 0; /* Restriction (1) */ + if( subqueryIsAgg && pSrc->nSrc>1 ) return 0; /* Restriction (2) */ + pSubSrc = pSub->pSrc; + assert( pSubSrc ); + /* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants, + ** not arbitrary expresssions, we allowed some combining of LIMIT and OFFSET + ** because they could be computed at compile-time. But when LIMIT and OFFSET + ** became arbitrary expressions, we were forced to add restrictions (13) + ** and (14). */ + if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */ + if( pSub->pOffset ) return 0; /* Restriction (14) */ + if( p->pRightmost && pSub->pLimit && pSub->pOrderBy ){ + return 0; /* Restriction (15) */ + } + if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */ + if( (pSub->isDistinct || pSub->pLimit) + && (pSrc->nSrc>1 || isAgg) ){ /* Restrictions (4)(5)(8)(9) */ + return 0; + } + if( p->isDistinct && subqueryIsAgg ) return 0; /* Restriction (6) */ + if( (p->disallowOrderBy || p->pOrderBy) && pSub->pOrderBy ){ + return 0; /* Restriction (11) */ + } + + /* Restriction 3: If the subquery is a join, make sure the subquery is + ** not used as the right operand of an outer join. Examples of why this + ** is not allowed: + ** + ** t1 LEFT OUTER JOIN (t2 JOIN t3) + ** + ** If we flatten the above, we would get + ** + ** (t1 LEFT OUTER JOIN t2) JOIN t3 + ** + ** which is not at all the same thing. + */ + if( pSubSrc->nSrc>1 && (pSubitem->jointype & JT_OUTER)!=0 ){ + return 0; + } + + /* Restriction 12: If the subquery is the right operand of a left outer + ** join, make sure the subquery has no WHERE clause. + ** An examples of why this is not allowed: + ** + ** t1 LEFT OUTER JOIN (SELECT * FROM t2 WHERE t2.x>0) + ** + ** If we flatten the above, we would get + ** + ** (t1 LEFT OUTER JOIN t2) WHERE t2.x>0 + ** + ** But the t2.x>0 test will always fail on a NULL row of t2, which + ** effectively converts the OUTER JOIN into an INNER JOIN. + */ + if( (pSubitem->jointype & JT_OUTER)!=0 && pSub->pWhere!=0 ){ + return 0; + } + + /* If we reach this point, it means flattening is permitted for the + ** iFrom-th entry of the FROM clause in the outer query. + */ + + /* Move all of the FROM elements of the subquery into the + ** the FROM clause of the outer query. Before doing this, remember + ** the cursor number for the original outer query FROM element in + ** iParent. The iParent cursor will never be used. Subsequent code + ** will scan expressions looking for iParent references and replace + ** those references with expressions that resolve to the subquery FROM + ** elements we are now copying in. + */ + iParent = pSubitem->iCursor; + { + int nSubSrc = pSubSrc->nSrc; + int jointype = pSubitem->jointype; + + sqlite3DeleteTable(pSubitem->pTab); + sqlite3_free(pSubitem->zDatabase); + sqlite3_free(pSubitem->zName); + sqlite3_free(pSubitem->zAlias); + if( nSubSrc>1 ){ + int extra = nSubSrc - 1; + for(i=1; ipSrc = pSrc; + for(i=pSrc->nSrc-1; i-extra>=iFrom; i--){ + pSrc->a[i] = pSrc->a[i-extra]; + } + } + for(i=0; ia[i+iFrom] = pSubSrc->a[i]; + memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); + } + pSrc->a[iFrom].jointype = jointype; + } + + /* Now begin substituting subquery result set expressions for + ** references to the iParent in the outer query. + ** + ** Example: + ** + ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b; + ** \ \_____________ subquery __________/ / + ** \_____________________ outer query ______________________________/ + ** + ** We look at every expression in the outer query and every place we see + ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10". + */ + pList = p->pEList; + for(i=0; inExpr; i++){ + Expr *pExpr; + if( pList->a[i].zName==0 && (pExpr = pList->a[i].pExpr)->span.z!=0 ){ + pList->a[i].zName = + sqlite3DbStrNDup(db, (char*)pExpr->span.z, pExpr->span.n); + } + } + substExprList(db, p->pEList, iParent, pSub->pEList); + if( isAgg ){ + substExprList(db, p->pGroupBy, iParent, pSub->pEList); + substExpr(db, p->pHaving, iParent, pSub->pEList); + } + if( pSub->pOrderBy ){ + assert( p->pOrderBy==0 ); + p->pOrderBy = pSub->pOrderBy; + pSub->pOrderBy = 0; + }else if( p->pOrderBy ){ + substExprList(db, p->pOrderBy, iParent, pSub->pEList); + } + if( pSub->pWhere ){ + pWhere = sqlite3ExprDup(db, pSub->pWhere); + }else{ + pWhere = 0; + } + if( subqueryIsAgg ){ + assert( p->pHaving==0 ); + p->pHaving = p->pWhere; + p->pWhere = pWhere; + substExpr(db, p->pHaving, iParent, pSub->pEList); + p->pHaving = sqlite3ExprAnd(db, p->pHaving, + sqlite3ExprDup(db, pSub->pHaving)); + assert( p->pGroupBy==0 ); + p->pGroupBy = sqlite3ExprListDup(db, pSub->pGroupBy); + }else{ + substExpr(db, p->pWhere, iParent, pSub->pEList); + p->pWhere = sqlite3ExprAnd(db, p->pWhere, pWhere); + } + + /* The flattened query is distinct if either the inner or the + ** outer query is distinct. + */ + p->isDistinct = p->isDistinct || pSub->isDistinct; + + /* + ** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y; + ** + ** One is tempted to try to add a and b to combine the limits. But this + ** does not work if either limit is negative. + */ + if( pSub->pLimit ){ + p->pLimit = pSub->pLimit; + pSub->pLimit = 0; + } + + /* Finially, delete what is left of the subquery and return + ** success. + */ + sqlite3SelectDelete(pSub); + return 1; +} +#endif /* SQLITE_OMIT_VIEW */ + +/* +** Analyze the SELECT statement passed in as an argument to see if it +** is a simple min() or max() query. If it is and this query can be +** satisfied using a single seek to the beginning or end of an index, +** then generate the code for this SELECT and return 1. If this is not a +** simple min() or max() query, then return 0; +** +** A simply min() or max() query looks like this: +** +** SELECT min(a) FROM table; +** SELECT max(a) FROM table; +** +** The query may have only a single table in its FROM argument. There +** can be no GROUP BY or HAVING or WHERE clauses. The result set must +** be the min() or max() of a single column of the table. The column +** in the min() or max() function must be indexed. +** +** The parameters to this routine are the same as for sqlite3Select(). +** See the header comment on that routine for additional information. +*/ +static int simpleMinMaxQuery(Parse *pParse, Select *p, int eDest, int iParm){ + Expr *pExpr; + int iCol; + Table *pTab; + Index *pIdx; + int base; + Vdbe *v; + int seekOp; + ExprList *pEList, *pList, eList; + struct ExprList_item eListItem; + SrcList *pSrc; + int brk; + int iDb; + + /* Check to see if this query is a simple min() or max() query. Return + ** zero if it is not. + */ + if( p->pGroupBy || p->pHaving || p->pWhere ) return 0; + pSrc = p->pSrc; + if( pSrc->nSrc!=1 ) return 0; + pEList = p->pEList; + if( pEList->nExpr!=1 ) return 0; + pExpr = pEList->a[0].pExpr; + if( pExpr->op!=TK_AGG_FUNCTION ) return 0; + pList = pExpr->pList; + if( pList==0 || pList->nExpr!=1 ) return 0; + if( pExpr->token.n!=3 ) return 0; + if( sqlite3StrNICmp((char*)pExpr->token.z,"min",3)==0 ){ + seekOp = OP_Rewind; + }else if( sqlite3StrNICmp((char*)pExpr->token.z,"max",3)==0 ){ + seekOp = OP_Last; + }else{ + return 0; + } + pExpr = pList->a[0].pExpr; + if( pExpr->op!=TK_COLUMN ) return 0; + iCol = pExpr->iColumn; + pTab = pSrc->a[0].pTab; + + /* This optimization cannot be used with virtual tables. */ + if( IsVirtual(pTab) ) return 0; + + /* If we get to here, it means the query is of the correct form. + ** Check to make sure we have an index and make pIdx point to the + ** appropriate index. If the min() or max() is on an INTEGER PRIMARY + ** key column, no index is necessary so set pIdx to NULL. If no + ** usable index is found, return 0. + */ + if( iCol<0 ){ + pIdx = 0; + }else{ + CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr); + if( pColl==0 ) return 0; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + assert( pIdx->nColumn>=1 ); + if( pIdx->aiColumn[0]==iCol && + 0==sqlite3StrICmp(pIdx->azColl[0], pColl->zName) ){ + break; + } + } + if( pIdx==0 ) return 0; + } + + /* Identify column types if we will be using the callback. This + ** step is skipped if the output is going to a table or a memory cell. + ** The column names have already been generated in the calling function. + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ) return 0; + + /* If the output is destined for a temporary table, open that table. + */ + if( eDest==SRT_EphemTab ){ + sqlite3VdbeAddOp(v, OP_OpenEphemeral, iParm, 1); + } + + /* Generating code to find the min or the max. Basically all we have + ** to do is find the first or the last entry in the chosen index. If + ** the min() or max() is on the INTEGER PRIMARY KEY, then find the first + ** or last entry in the main table. + */ + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + assert( iDb>=0 || pTab->isEphem ); + sqlite3CodeVerifySchema(pParse, iDb); + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + base = pSrc->a[0].iCursor; + brk = sqlite3VdbeMakeLabel(v); + computeLimitRegisters(pParse, p, brk); + if( pSrc->a[0].pSelect==0 ){ + sqlite3OpenTable(pParse, base, iDb, pTab, OP_OpenRead); + } + if( pIdx==0 ){ + sqlite3VdbeAddOp(v, seekOp, base, 0); + }else{ + /* Even though the cursor used to open the index here is closed + ** as soon as a single value has been read from it, allocate it + ** using (pParse->nTab++) to prevent the cursor id from being + ** reused. This is important for statements of the form + ** "INSERT INTO x SELECT max() FROM x". + */ + int iIdx; + KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); + iIdx = pParse->nTab++; + assert( pIdx->pSchema==pTab->pSchema ); + sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); + sqlite3VdbeOp3(v, OP_OpenRead, iIdx, pIdx->tnum, + (char*)pKey, P3_KEYINFO_HANDOFF); + if( seekOp==OP_Rewind ){ + sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp(v, OP_MakeRecord, 1, 0); + seekOp = OP_MoveGt; + } + if( pIdx->aSortOrder[0]==SQLITE_SO_DESC ){ + /* Ticket #2514: invert the seek operator if we are using + ** a descending index. */ + if( seekOp==OP_Last ){ + seekOp = OP_Rewind; + }else{ + assert( seekOp==OP_MoveGt ); + seekOp = OP_MoveLt; + } + } + sqlite3VdbeAddOp(v, seekOp, iIdx, 0); + sqlite3VdbeAddOp(v, OP_IdxRowid, iIdx, 0); + sqlite3VdbeAddOp(v, OP_Close, iIdx, 0); + sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); + } + eList.nExpr = 1; + memset(&eListItem, 0, sizeof(eListItem)); + eList.a = &eListItem; + eList.a[0].pExpr = pExpr; + selectInnerLoop(pParse, p, &eList, 0, 0, 0, -1, eDest, iParm, brk, brk, 0); + sqlite3VdbeResolveLabel(v, brk); + sqlite3VdbeAddOp(v, OP_Close, base, 0); + + return 1; +} + +/* +** Analyze and ORDER BY or GROUP BY clause in a SELECT statement. Return +** the number of errors seen. +** +** An ORDER BY or GROUP BY is a list of expressions. If any expression +** is an integer constant, then that expression is replaced by the +** corresponding entry in the result set. +*/ +static int processOrderGroupBy( + NameContext *pNC, /* Name context of the SELECT statement. */ + ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */ + const char *zType /* Either "ORDER" or "GROUP", as appropriate */ +){ + int i; + ExprList *pEList = pNC->pEList; /* The result set of the SELECT */ + Parse *pParse = pNC->pParse; /* The result set of the SELECT */ + assert( pEList ); + + if( pOrderBy==0 ) return 0; + if( pOrderBy->nExpr>SQLITE_MAX_COLUMN ){ + sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType); + return 1; + } + for(i=0; inExpr; i++){ + int iCol; + Expr *pE = pOrderBy->a[i].pExpr; + if( sqlite3ExprIsInteger(pE, &iCol) ){ + if( iCol>0 && iCol<=pEList->nExpr ){ + CollSeq *pColl = pE->pColl; + int flags = pE->flags & EP_ExpCollate; + sqlite3ExprDelete(pE); + pE = sqlite3ExprDup(pParse->db, pEList->a[iCol-1].pExpr); + pOrderBy->a[i].pExpr = pE; + if( pColl && flags ){ + pE->pColl = pColl; + pE->flags |= flags; + } + }else{ + sqlite3ErrorMsg(pParse, + "%s BY column number %d out of range - should be " + "between 1 and %d", zType, iCol, pEList->nExpr); + return 1; + } + } + if( sqlite3ExprResolveNames(pNC, pE) ){ + return 1; + } + } + return 0; +} + +/* +** This routine resolves any names used in the result set of the +** supplied SELECT statement. If the SELECT statement being resolved +** is a sub-select, then pOuterNC is a pointer to the NameContext +** of the parent SELECT. +*/ +int sqlite3SelectResolve( + Parse *pParse, /* The parser context */ + Select *p, /* The SELECT statement being coded. */ + NameContext *pOuterNC /* The outer name context. May be NULL. */ +){ + ExprList *pEList; /* Result set. */ + int i; /* For-loop variable used in multiple places */ + NameContext sNC; /* Local name-context */ + ExprList *pGroupBy; /* The group by clause */ + + /* If this routine has run before, return immediately. */ + if( p->isResolved ){ + assert( !pOuterNC ); + return SQLITE_OK; + } + p->isResolved = 1; + + /* If there have already been errors, do nothing. */ + if( pParse->nErr>0 ){ + return SQLITE_ERROR; + } + + /* Prepare the select statement. This call will allocate all cursors + ** required to handle the tables and subqueries in the FROM clause. + */ + if( prepSelectStmt(pParse, p) ){ + return SQLITE_ERROR; + } + + /* Resolve the expressions in the LIMIT and OFFSET clauses. These + ** are not allowed to refer to any names, so pass an empty NameContext. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + if( sqlite3ExprResolveNames(&sNC, p->pLimit) || + sqlite3ExprResolveNames(&sNC, p->pOffset) ){ + return SQLITE_ERROR; + } + + /* Set up the local name-context to pass to ExprResolveNames() to + ** resolve the expression-list. + */ + sNC.allowAgg = 1; + sNC.pSrcList = p->pSrc; + sNC.pNext = pOuterNC; + + /* Resolve names in the result set. */ + pEList = p->pEList; + if( !pEList ) return SQLITE_ERROR; + for(i=0; inExpr; i++){ + Expr *pX = pEList->a[i].pExpr; + if( sqlite3ExprResolveNames(&sNC, pX) ){ + return SQLITE_ERROR; + } + } + + /* If there are no aggregate functions in the result-set, and no GROUP BY + ** expression, do not allow aggregates in any of the other expressions. + */ + assert( !p->isAgg ); + pGroupBy = p->pGroupBy; + if( pGroupBy || sNC.hasAgg ){ + p->isAgg = 1; + }else{ + sNC.allowAgg = 0; + } + + /* If a HAVING clause is present, then there must be a GROUP BY clause. + */ + if( p->pHaving && !pGroupBy ){ + sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); + return SQLITE_ERROR; + } + + /* Add the expression list to the name-context before parsing the + ** other expressions in the SELECT statement. This is so that + ** expressions in the WHERE clause (etc.) can refer to expressions by + ** aliases in the result set. + ** + ** Minor point: If this is the case, then the expression will be + ** re-evaluated for each reference to it. + */ + sNC.pEList = p->pEList; + if( sqlite3ExprResolveNames(&sNC, p->pWhere) || + sqlite3ExprResolveNames(&sNC, p->pHaving) ){ + return SQLITE_ERROR; + } + if( p->pPrior==0 ){ + if( processOrderGroupBy(&sNC, p->pOrderBy, "ORDER") || + processOrderGroupBy(&sNC, pGroupBy, "GROUP") ){ + return SQLITE_ERROR; + } + } + + if( pParse->db->mallocFailed ){ + return SQLITE_NOMEM; + } + + /* Make sure the GROUP BY clause does not contain aggregate functions. + */ + if( pGroupBy ){ + struct ExprList_item *pItem; + + for(i=0, pItem=pGroupBy->a; inExpr; i++, pItem++){ + if( ExprHasProperty(pItem->pExpr, EP_Agg) ){ + sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in " + "the GROUP BY clause"); + return SQLITE_ERROR; + } + } + } + + /* If this is one SELECT of a compound, be sure to resolve names + ** in the other SELECTs. + */ + if( p->pPrior ){ + return sqlite3SelectResolve(pParse, p->pPrior, pOuterNC); + }else{ + return SQLITE_OK; + } +} + +/* +** Reset the aggregate accumulator. +** +** The aggregate accumulator is a set of memory cells that hold +** intermediate results while calculating an aggregate. This +** routine simply stores NULLs in all of those memory cells. +*/ +static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ + Vdbe *v = pParse->pVdbe; + int i; + struct AggInfo_func *pFunc; + if( pAggInfo->nFunc+pAggInfo->nColumn==0 ){ + return; + } + for(i=0; inColumn; i++){ + sqlite3VdbeAddOp(v, OP_MemNull, pAggInfo->aCol[i].iMem, 0); + } + for(pFunc=pAggInfo->aFunc, i=0; inFunc; i++, pFunc++){ + sqlite3VdbeAddOp(v, OP_MemNull, pFunc->iMem, 0); + if( pFunc->iDistinct>=0 ){ + Expr *pE = pFunc->pExpr; + if( pE->pList==0 || pE->pList->nExpr!=1 ){ + sqlite3ErrorMsg(pParse, "DISTINCT in aggregate must be followed " + "by an expression"); + pFunc->iDistinct = -1; + }else{ + KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->pList); + sqlite3VdbeOp3(v, OP_OpenEphemeral, pFunc->iDistinct, 0, + (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + } + } + } +} + +/* +** Invoke the OP_AggFinalize opcode for every aggregate function +** in the AggInfo structure. +*/ +static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ + Vdbe *v = pParse->pVdbe; + int i; + struct AggInfo_func *pF; + for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ + ExprList *pList = pF->pExpr->pList; + sqlite3VdbeOp3(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0, + (void*)pF->pFunc, P3_FUNCDEF); + } +} + +/* +** Update the accumulator memory cells for an aggregate based on +** the current cursor position. +*/ +static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){ + Vdbe *v = pParse->pVdbe; + int i; + struct AggInfo_func *pF; + struct AggInfo_col *pC; + + pAggInfo->directMode = 1; + for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ + int nArg; + int addrNext = 0; + ExprList *pList = pF->pExpr->pList; + if( pList ){ + nArg = pList->nExpr; + sqlite3ExprCodeExprList(pParse, pList); + }else{ + nArg = 0; + } + if( pF->iDistinct>=0 ){ + addrNext = sqlite3VdbeMakeLabel(v); + assert( nArg==1 ); + codeDistinct(v, pF->iDistinct, addrNext, 1); + } + if( pF->pFunc->needCollSeq ){ + CollSeq *pColl = 0; + struct ExprList_item *pItem; + int j; + assert( pList!=0 ); /* pList!=0 if pF->pFunc->needCollSeq is true */ + for(j=0, pItem=pList->a; !pColl && jpExpr); + } + if( !pColl ){ + pColl = pParse->db->pDfltColl; + } + sqlite3VdbeOp3(v, OP_CollSeq, 0, 0, (char *)pColl, P3_COLLSEQ); + } + sqlite3VdbeOp3(v, OP_AggStep, pF->iMem, nArg, (void*)pF->pFunc, P3_FUNCDEF); + if( addrNext ){ + sqlite3VdbeResolveLabel(v, addrNext); + } + } + for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ + sqlite3ExprCode(pParse, pC->pExpr); + sqlite3VdbeAddOp(v, OP_MemStore, pC->iMem, 1); + } + pAggInfo->directMode = 0; +} + + +/* +** Generate code for the given SELECT statement. +** +** The results are distributed in various ways depending on the +** value of eDest and iParm. +** +** eDest Value Result +** ------------ ------------------------------------------- +** SRT_Callback Invoke the callback for each row of the result. +** +** SRT_Mem Store first result in memory cell iParm +** +** SRT_Set Store results as keys of table iParm. +** +** SRT_Union Store results as a key in a temporary table iParm +** +** SRT_Except Remove results from the temporary table iParm. +** +** SRT_Table Store results in temporary table iParm +** +** The table above is incomplete. Additional eDist value have be added +** since this comment was written. See the selectInnerLoop() function for +** a complete listing of the allowed values of eDest and their meanings. +** +** This routine returns the number of errors. If any errors are +** encountered, then an appropriate error message is left in +** pParse->zErrMsg. +** +** This routine does NOT free the Select structure passed in. The +** calling function needs to do that. +** +** The pParent, parentTab, and *pParentAgg fields are filled in if this +** SELECT is a subquery. This routine may try to combine this SELECT +** with its parent to form a single flat query. In so doing, it might +** change the parent query from a non-aggregate to an aggregate query. +** For that reason, the pParentAgg flag is passed as a pointer, so it +** can be changed. +** +** Example 1: The meaning of the pParent parameter. +** +** SELECT * FROM t1 JOIN (SELECT x, count(*) FROM t2) JOIN t3; +** \ \_______ subquery _______/ / +** \ / +** \____________________ outer query ___________________/ +** +** This routine is called for the outer query first. For that call, +** pParent will be NULL. During the processing of the outer query, this +** routine is called recursively to handle the subquery. For the recursive +** call, pParent will point to the outer query. Because the subquery is +** the second element in a three-way join, the parentTab parameter will +** be 1 (the 2nd value of a 0-indexed array.) +*/ +int sqlite3Select( + Parse *pParse, /* The parser context */ + Select *p, /* The SELECT statement being coded. */ + int eDest, /* How to dispose of the results */ + int iParm, /* A parameter used by the eDest disposal method */ + Select *pParent, /* Another SELECT for which this is a sub-query */ + int parentTab, /* Index in pParent->pSrc of this query */ + int *pParentAgg, /* True if pParent uses aggregate functions */ + char *aff /* If eDest is SRT_Union, the affinity string */ +){ + int i, j; /* Loop counters */ + WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */ + Vdbe *v; /* The virtual machine under construction */ + int isAgg; /* True for select lists like "count(*)" */ + ExprList *pEList; /* List of columns to extract. */ + SrcList *pTabList; /* List of tables to select from */ + Expr *pWhere; /* The WHERE clause. May be NULL */ + ExprList *pOrderBy; /* The ORDER BY clause. May be NULL */ + ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */ + Expr *pHaving; /* The HAVING clause. May be NULL */ + int isDistinct; /* True if the DISTINCT keyword is present */ + int distinct; /* Table to use for the distinct set */ + int rc = 1; /* Value to return from this function */ + int addrSortIndex; /* Address of an OP_OpenEphemeral instruction */ + AggInfo sAggInfo; /* Information used by aggregate queries */ + int iEnd; /* Address of the end of the query */ + sqlite3 *db; /* The database connection */ + + db = pParse->db; + if( p==0 || db->mallocFailed || pParse->nErr ){ + return 1; + } + if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; + memset(&sAggInfo, 0, sizeof(sAggInfo)); + +#ifndef SQLITE_OMIT_COMPOUND_SELECT + /* If there is are a sequence of queries, do the earlier ones first. + */ + if( p->pPrior ){ + if( p->pRightmost==0 ){ + Select *pLoop; + int cnt = 0; + for(pLoop=p; pLoop; pLoop=pLoop->pPrior, cnt++){ + pLoop->pRightmost = p; + } + if( SQLITE_MAX_COMPOUND_SELECT>0 && cnt>SQLITE_MAX_COMPOUND_SELECT ){ + sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); + return 1; + } + } + return multiSelect(pParse, p, eDest, iParm, aff); + } +#endif + + pOrderBy = p->pOrderBy; + if( IgnorableOrderby(eDest) ){ + p->pOrderBy = 0; + } + if( sqlite3SelectResolve(pParse, p, 0) ){ + goto select_end; + } + p->pOrderBy = pOrderBy; + + /* Make local copies of the parameters for this query. + */ + pTabList = p->pSrc; + pWhere = p->pWhere; + pGroupBy = p->pGroupBy; + pHaving = p->pHaving; + isAgg = p->isAgg; + isDistinct = p->isDistinct; + pEList = p->pEList; + if( pEList==0 ) goto select_end; + + /* + ** Do not even attempt to generate any code if we have already seen + ** errors before this routine starts. + */ + if( pParse->nErr>0 ) goto select_end; + + /* If writing to memory or generating a set + ** only a single column may be output. + */ +#ifndef SQLITE_OMIT_SUBQUERY + if( checkForMultiColumnSelectError(pParse, eDest, pEList->nExpr) ){ + goto select_end; + } +#endif + + /* ORDER BY is ignored for some destinations. + */ + if( IgnorableOrderby(eDest) ){ + pOrderBy = 0; + } + + /* Begin generating code. + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto select_end; + + /* Generate code for all sub-queries in the FROM clause + */ +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) + for(i=0; inSrc; i++){ + const char *zSavedAuthContext = 0; + int needRestoreContext; + struct SrcList_item *pItem = &pTabList->a[i]; + + if( pItem->pSelect==0 || pItem->isPopulated ) continue; + if( pItem->zName!=0 ){ + zSavedAuthContext = pParse->zAuthContext; + pParse->zAuthContext = pItem->zName; + needRestoreContext = 1; + }else{ + needRestoreContext = 0; + } +#if defined(SQLITE_TEST) || SQLITE_MAX_EXPR_DEPTH>0 + /* Increment Parse.nHeight by the height of the largest expression + ** tree refered to by this, the parent select. The child select + ** may contain expression trees of at most + ** (SQLITE_MAX_EXPR_DEPTH-Parse.nHeight) height. This is a bit + ** more conservative than necessary, but much easier than enforcing + ** an exact limit. + */ + pParse->nHeight += sqlite3SelectExprHeight(p); +#endif + sqlite3Select(pParse, pItem->pSelect, SRT_EphemTab, + pItem->iCursor, p, i, &isAgg, 0); +#if defined(SQLITE_TEST) || SQLITE_MAX_EXPR_DEPTH>0 + pParse->nHeight -= sqlite3SelectExprHeight(p); +#endif + if( needRestoreContext ){ + pParse->zAuthContext = zSavedAuthContext; + } + pTabList = p->pSrc; + pWhere = p->pWhere; + if( !IgnorableOrderby(eDest) ){ + pOrderBy = p->pOrderBy; + } + pGroupBy = p->pGroupBy; + pHaving = p->pHaving; + isDistinct = p->isDistinct; + } +#endif + + /* Check for the special case of a min() or max() function by itself + ** in the result set. + */ + if( simpleMinMaxQuery(pParse, p, eDest, iParm) ){ + rc = 0; + goto select_end; + } + + /* Check to see if this is a subquery that can be "flattened" into its parent. + ** If flattening is a possiblity, do so and return immediately. + */ +#ifndef SQLITE_OMIT_VIEW + if( pParent && pParentAgg && + flattenSubquery(db, pParent, parentTab, *pParentAgg, isAgg) ){ + if( isAgg ) *pParentAgg = 1; + goto select_end; + } +#endif + + /* If there is an ORDER BY clause, then this sorting + ** index might end up being unused if the data can be + ** extracted in pre-sorted order. If that is the case, then the + ** OP_OpenEphemeral instruction will be changed to an OP_Noop once + ** we figure out that the sorting index is not needed. The addrSortIndex + ** variable is used to facilitate that change. + */ + if( pOrderBy ){ + KeyInfo *pKeyInfo; + if( pParse->nErr ){ + goto select_end; + } + pKeyInfo = keyInfoFromExprList(pParse, pOrderBy); + pOrderBy->iECursor = pParse->nTab++; + p->addrOpenEphm[2] = addrSortIndex = + sqlite3VdbeOp3(v, OP_OpenEphemeral, pOrderBy->iECursor, pOrderBy->nExpr+2, (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + }else{ + addrSortIndex = -1; + } + + /* If the output is destined for a temporary table, open that table. + */ + if( eDest==SRT_EphemTab ){ + sqlite3VdbeAddOp(v, OP_OpenEphemeral, iParm, pEList->nExpr); + } + + /* Set the limiter. + */ + iEnd = sqlite3VdbeMakeLabel(v); + computeLimitRegisters(pParse, p, iEnd); + + /* Open a virtual index to use for the distinct set. + */ + if( isDistinct ){ + KeyInfo *pKeyInfo; + distinct = pParse->nTab++; + pKeyInfo = keyInfoFromExprList(pParse, p->pEList); + sqlite3VdbeOp3(v, OP_OpenEphemeral, distinct, 0, + (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + }else{ + distinct = -1; + } + + /* Aggregate and non-aggregate queries are handled differently */ + if( !isAgg && pGroupBy==0 ){ + /* This case is for non-aggregate queries + ** Begin the database scan + */ + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pOrderBy); + if( pWInfo==0 ) goto select_end; + + /* If sorting index that was created by a prior OP_OpenEphemeral + ** instruction ended up not being needed, then change the OP_OpenEphemeral + ** into an OP_Noop. + */ + if( addrSortIndex>=0 && pOrderBy==0 ){ + sqlite3VdbeChangeToNoop(v, addrSortIndex, 1); + p->addrOpenEphm[2] = -1; + } + + /* Use the standard inner loop + */ + if( selectInnerLoop(pParse, p, pEList, 0, 0, pOrderBy, distinct, eDest, + iParm, pWInfo->iContinue, pWInfo->iBreak, aff) ){ + goto select_end; + } + + /* End the database scan loop. + */ + sqlite3WhereEnd(pWInfo); + }else{ + /* This is the processing for aggregate queries */ + NameContext sNC; /* Name context for processing aggregate information */ + int iAMem; /* First Mem address for storing current GROUP BY */ + int iBMem; /* First Mem address for previous GROUP BY */ + int iUseFlag; /* Mem address holding flag indicating that at least + ** one row of the input to the aggregator has been + ** processed */ + int iAbortFlag; /* Mem address which causes query abort if positive */ + int groupBySort; /* Rows come from source in GROUP BY order */ + + + /* The following variables hold addresses or labels for parts of the + ** virtual machine program we are putting together */ + int addrOutputRow; /* Start of subroutine that outputs a result row */ + int addrSetAbort; /* Set the abort flag and return */ + int addrInitializeLoop; /* Start of code that initializes the input loop */ + int addrTopOfLoop; /* Top of the input loop */ + int addrGroupByChange; /* Code that runs when any GROUP BY term changes */ + int addrProcessRow; /* Code to process a single input row */ + int addrEnd; /* End of all processing */ + int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */ + int addrReset; /* Subroutine for resetting the accumulator */ + + addrEnd = sqlite3VdbeMakeLabel(v); + + /* Convert TK_COLUMN nodes into TK_AGG_COLUMN and make entries in + ** sAggInfo for all TK_AGG_FUNCTION nodes in expressions of the + ** SELECT statement. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + sNC.pSrcList = pTabList; + sNC.pAggInfo = &sAggInfo; + sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr+1 : 0; + sAggInfo.pGroupBy = pGroupBy; + if( sqlite3ExprAnalyzeAggList(&sNC, pEList) ){ + goto select_end; + } + if( sqlite3ExprAnalyzeAggList(&sNC, pOrderBy) ){ + goto select_end; + } + if( pHaving && sqlite3ExprAnalyzeAggregates(&sNC, pHaving) ){ + goto select_end; + } + sAggInfo.nAccumulator = sAggInfo.nColumn; + for(i=0; ipList) ){ + goto select_end; + } + } + if( db->mallocFailed ) goto select_end; + + /* Processing for aggregates with GROUP BY is very different and + ** much more complex tha aggregates without a GROUP BY. + */ + if( pGroupBy ){ + KeyInfo *pKeyInfo; /* Keying information for the group by clause */ + + /* Create labels that we will be needing + */ + + addrInitializeLoop = sqlite3VdbeMakeLabel(v); + addrGroupByChange = sqlite3VdbeMakeLabel(v); + addrProcessRow = sqlite3VdbeMakeLabel(v); + + /* If there is a GROUP BY clause we might need a sorting index to + ** implement it. Allocate that sorting index now. If it turns out + ** that we do not need it after all, the OpenEphemeral instruction + ** will be converted into a Noop. + */ + sAggInfo.sortingIdx = pParse->nTab++; + pKeyInfo = keyInfoFromExprList(pParse, pGroupBy); + addrSortingIdx = + sqlite3VdbeOp3(v, OP_OpenEphemeral, sAggInfo.sortingIdx, + sAggInfo.nSortingColumn, + (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + + /* Initialize memory locations used by GROUP BY aggregate processing + */ + iUseFlag = pParse->nMem++; + iAbortFlag = pParse->nMem++; + iAMem = pParse->nMem; + pParse->nMem += pGroupBy->nExpr; + iBMem = pParse->nMem; + pParse->nMem += pGroupBy->nExpr; + sqlite3VdbeAddOp(v, OP_MemInt, 0, iAbortFlag); + VdbeComment((v, "# clear abort flag")); + sqlite3VdbeAddOp(v, OP_MemInt, 0, iUseFlag); + VdbeComment((v, "# indicate accumulator empty")); + sqlite3VdbeAddOp(v, OP_Goto, 0, addrInitializeLoop); + + /* Generate a subroutine that outputs a single row of the result + ** set. This subroutine first looks at the iUseFlag. If iUseFlag + ** is less than or equal to zero, the subroutine is a no-op. If + ** the processing calls for the query to abort, this subroutine + ** increments the iAbortFlag memory location before returning in + ** order to signal the caller to abort. + */ + addrSetAbort = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp(v, OP_MemInt, 1, iAbortFlag); + VdbeComment((v, "# set abort flag")); + sqlite3VdbeAddOp(v, OP_Return, 0, 0); + addrOutputRow = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp(v, OP_IfMemPos, iUseFlag, addrOutputRow+2); + VdbeComment((v, "# Groupby result generator entry point")); + sqlite3VdbeAddOp(v, OP_Return, 0, 0); + finalizeAggFunctions(pParse, &sAggInfo); + if( pHaving ){ + sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, 1); + } + rc = selectInnerLoop(pParse, p, p->pEList, 0, 0, pOrderBy, + distinct, eDest, iParm, + addrOutputRow+1, addrSetAbort, aff); + if( rc ){ + goto select_end; + } + sqlite3VdbeAddOp(v, OP_Return, 0, 0); + VdbeComment((v, "# end groupby result generator")); + + /* Generate a subroutine that will reset the group-by accumulator + */ + addrReset = sqlite3VdbeCurrentAddr(v); + resetAccumulator(pParse, &sAggInfo); + sqlite3VdbeAddOp(v, OP_Return, 0, 0); + + /* Begin a loop that will extract all source rows in GROUP BY order. + ** This might involve two separate loops with an OP_Sort in between, or + ** it might be a single loop that uses an index to extract information + ** in the right order to begin with. + */ + sqlite3VdbeResolveLabel(v, addrInitializeLoop); + sqlite3VdbeAddOp(v, OP_Gosub, 0, addrReset); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pGroupBy); + if( pWInfo==0 ) goto select_end; + if( pGroupBy==0 ){ + /* The optimizer is able to deliver rows in group by order so + ** we do not have to sort. The OP_OpenEphemeral table will be + ** cancelled later because we still need to use the pKeyInfo + */ + pGroupBy = p->pGroupBy; + groupBySort = 0; + }else{ + /* Rows are coming out in undetermined order. We have to push + ** each row into a sorting index, terminate the first loop, + ** then loop over the sorting index in order to get the output + ** in sorted order + */ + groupBySort = 1; + sqlite3ExprCodeExprList(pParse, pGroupBy); + sqlite3VdbeAddOp(v, OP_Sequence, sAggInfo.sortingIdx, 0); + j = pGroupBy->nExpr+1; + for(i=0; iiSorterColumnpTab, pCol->iColumn, pCol->iTable); + j++; + } + sqlite3VdbeAddOp(v, OP_MakeRecord, j, 0); + sqlite3VdbeAddOp(v, OP_IdxInsert, sAggInfo.sortingIdx, 0); + sqlite3WhereEnd(pWInfo); + sqlite3VdbeAddOp(v, OP_Sort, sAggInfo.sortingIdx, addrEnd); + VdbeComment((v, "# GROUP BY sort")); + sAggInfo.useSortingIdx = 1; + } + + /* Evaluate the current GROUP BY terms and store in b0, b1, b2... + ** (b0 is memory location iBMem+0, b1 is iBMem+1, and so forth) + ** Then compare the current GROUP BY terms against the GROUP BY terms + ** from the previous row currently stored in a0, a1, a2... + */ + addrTopOfLoop = sqlite3VdbeCurrentAddr(v); + for(j=0; jnExpr; j++){ + if( groupBySort ){ + sqlite3VdbeAddOp(v, OP_Column, sAggInfo.sortingIdx, j); + }else{ + sAggInfo.directMode = 1; + sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr); + } + sqlite3VdbeAddOp(v, OP_MemStore, iBMem+j, jnExpr-1); + } + for(j=pGroupBy->nExpr-1; j>=0; j--){ + if( jnExpr-1 ){ + sqlite3VdbeAddOp(v, OP_MemLoad, iBMem+j, 0); + } + sqlite3VdbeAddOp(v, OP_MemLoad, iAMem+j, 0); + if( j==0 ){ + sqlite3VdbeAddOp(v, OP_Eq, 0x200, addrProcessRow); + }else{ + sqlite3VdbeAddOp(v, OP_Ne, 0x200, addrGroupByChange); + } + sqlite3VdbeChangeP3(v, -1, (void*)pKeyInfo->aColl[j], P3_COLLSEQ); + } + + /* Generate code that runs whenever the GROUP BY changes. + ** Change in the GROUP BY are detected by the previous code + ** block. If there were no changes, this block is skipped. + ** + ** This code copies current group by terms in b0,b1,b2,... + ** over to a0,a1,a2. It then calls the output subroutine + ** and resets the aggregate accumulator registers in preparation + ** for the next GROUP BY batch. + */ + sqlite3VdbeResolveLabel(v, addrGroupByChange); + for(j=0; jnExpr; j++){ + sqlite3VdbeAddOp(v, OP_MemMove, iAMem+j, iBMem+j); + } + sqlite3VdbeAddOp(v, OP_Gosub, 0, addrOutputRow); + VdbeComment((v, "# output one row")); + sqlite3VdbeAddOp(v, OP_IfMemPos, iAbortFlag, addrEnd); + VdbeComment((v, "# check abort flag")); + sqlite3VdbeAddOp(v, OP_Gosub, 0, addrReset); + VdbeComment((v, "# reset accumulator")); + + /* Update the aggregate accumulators based on the content of + ** the current row + */ + sqlite3VdbeResolveLabel(v, addrProcessRow); + updateAccumulator(pParse, &sAggInfo); + sqlite3VdbeAddOp(v, OP_MemInt, 1, iUseFlag); + VdbeComment((v, "# indicate data in accumulator")); + + /* End of the loop + */ + if( groupBySort ){ + sqlite3VdbeAddOp(v, OP_Next, sAggInfo.sortingIdx, addrTopOfLoop); + }else{ + sqlite3WhereEnd(pWInfo); + sqlite3VdbeChangeToNoop(v, addrSortingIdx, 1); + } + + /* Output the final row of result + */ + sqlite3VdbeAddOp(v, OP_Gosub, 0, addrOutputRow); + VdbeComment((v, "# output final row")); + + } /* endif pGroupBy */ + else { + /* This case runs if the aggregate has no GROUP BY clause. The + ** processing is much simpler since there is only a single row + ** of output. + */ + resetAccumulator(pParse, &sAggInfo); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0); + if( pWInfo==0 ) goto select_end; + updateAccumulator(pParse, &sAggInfo); + sqlite3WhereEnd(pWInfo); + finalizeAggFunctions(pParse, &sAggInfo); + pOrderBy = 0; + if( pHaving ){ + sqlite3ExprIfFalse(pParse, pHaving, addrEnd, 1); + } + selectInnerLoop(pParse, p, p->pEList, 0, 0, 0, -1, + eDest, iParm, addrEnd, addrEnd, aff); + } + sqlite3VdbeResolveLabel(v, addrEnd); + + } /* endif aggregate query */ + + /* If there is an ORDER BY clause, then we need to sort the results + ** and send them to the callback one by one. + */ + if( pOrderBy ){ + generateSortTail(pParse, p, v, pEList->nExpr, eDest, iParm); + } + +#ifndef SQLITE_OMIT_SUBQUERY + /* If this was a subquery, we have now converted the subquery into a + ** temporary table. So set the SrcList_item.isPopulated flag to prevent + ** this subquery from being evaluated again and to force the use of + ** the temporary table. + */ + if( pParent ){ + assert( pParent->pSrc->nSrc>parentTab ); + assert( pParent->pSrc->a[parentTab].pSelect==p ); + pParent->pSrc->a[parentTab].isPopulated = 1; + } +#endif + + /* Jump here to skip this query + */ + sqlite3VdbeResolveLabel(v, iEnd); + + /* The SELECT was successfully coded. Set the return code to 0 + ** to indicate no errors. + */ + rc = 0; + + /* Control jumps to here if an error is encountered above, or upon + ** successful coding of the SELECT. + */ +select_end: + + /* Identify column names if we will be using them in a callback. This + ** step is skipped if the output is going to some other destination. + */ + if( rc==SQLITE_OK && eDest==SRT_Callback ){ + generateColumnNames(pParse, pTabList, pEList); + } + + sqlite3_free(sAggInfo.aCol); + sqlite3_free(sAggInfo.aFunc); + return rc; +} + +#if defined(SQLITE_DEBUG) +/* +******************************************************************************* +** The following code is used for testing and debugging only. The code +** that follows does not appear in normal builds. +** +** These routines are used to print out the content of all or part of a +** parse structures such as Select or Expr. Such printouts are useful +** for helping to understand what is happening inside the code generator +** during the execution of complex SELECT statements. +** +** These routine are not called anywhere from within the normal +** code base. Then are intended to be called from within the debugger +** or from temporary "printf" statements inserted for debugging. +*/ +void sqlite3PrintExpr(Expr *p){ + if( p->token.z && p->token.n>0 ){ + sqlite3DebugPrintf("(%.*s", p->token.n, p->token.z); + }else{ + sqlite3DebugPrintf("(%d", p->op); + } + if( p->pLeft ){ + sqlite3DebugPrintf(" "); + sqlite3PrintExpr(p->pLeft); + } + if( p->pRight ){ + sqlite3DebugPrintf(" "); + sqlite3PrintExpr(p->pRight); + } + sqlite3DebugPrintf(")"); +} +void sqlite3PrintExprList(ExprList *pList){ + int i; + for(i=0; inExpr; i++){ + sqlite3PrintExpr(pList->a[i].pExpr); + if( inExpr-1 ){ + sqlite3DebugPrintf(", "); + } + } +} +void sqlite3PrintSelect(Select *p, int indent){ + sqlite3DebugPrintf("%*sSELECT(%p) ", indent, "", p); + sqlite3PrintExprList(p->pEList); + sqlite3DebugPrintf("\n"); + if( p->pSrc ){ + char *zPrefix; + int i; + zPrefix = "FROM"; + for(i=0; ipSrc->nSrc; i++){ + struct SrcList_item *pItem = &p->pSrc->a[i]; + sqlite3DebugPrintf("%*s ", indent+6, zPrefix); + zPrefix = ""; + if( pItem->pSelect ){ + sqlite3DebugPrintf("(\n"); + sqlite3PrintSelect(pItem->pSelect, indent+10); + sqlite3DebugPrintf("%*s)", indent+8, ""); + }else if( pItem->zName ){ + sqlite3DebugPrintf("%s", pItem->zName); + } + if( pItem->pTab ){ + sqlite3DebugPrintf("(table: %s)", pItem->pTab->zName); + } + if( pItem->zAlias ){ + sqlite3DebugPrintf(" AS %s", pItem->zAlias); + } + if( ipSrc->nSrc-1 ){ + sqlite3DebugPrintf(","); + } + sqlite3DebugPrintf("\n"); + } + } + if( p->pWhere ){ + sqlite3DebugPrintf("%*s WHERE ", indent, ""); + sqlite3PrintExpr(p->pWhere); + sqlite3DebugPrintf("\n"); + } + if( p->pGroupBy ){ + sqlite3DebugPrintf("%*s GROUP BY ", indent, ""); + sqlite3PrintExprList(p->pGroupBy); + sqlite3DebugPrintf("\n"); + } + if( p->pHaving ){ + sqlite3DebugPrintf("%*s HAVING ", indent, ""); + sqlite3PrintExpr(p->pHaving); + sqlite3DebugPrintf("\n"); + } + if( p->pOrderBy ){ + sqlite3DebugPrintf("%*s ORDER BY ", indent, ""); + sqlite3PrintExprList(p->pOrderBy); + sqlite3DebugPrintf("\n"); + } +} +/* End of the structure debug printing code +*****************************************************************************/ +#endif /* defined(SQLITE_TEST) || defined(SQLITE_DEBUG) */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/shell.c b/libraries/sqlite/unix/sqlite-3.5.1/src/shell.c new file mode 100644 index 0000000000..bd5105e48c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/shell.c @@ -0,0 +1,2019 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code to implement the "sqlite" command line +** utility for accessing SQLite databases. +** +** $Id: shell.c,v 1.167 2007/09/07 01:12:32 drh Exp $ +*/ +#include +#include +#include +#include +#include "sqlite3.h" +#include +#include + +#if !defined(_WIN32) && !defined(WIN32) && !defined(__MACOS__) && !defined(__OS2__) +# include +# include +# include +# include +#endif + +#ifdef __MACOS__ +# include +# include +# include +# include +# include +# include +#endif + +#ifdef __OS2__ +# include +#endif + +#if defined(HAVE_READLINE) && HAVE_READLINE==1 +# include +# include +#else +# define readline(p) local_getline(p,stdin) +# define add_history(X) +# define read_history(X) +# define write_history(X) +# define stifle_history(X) +#endif + +#if defined(_WIN32) || defined(WIN32) +# include +#else +/* Make sure isatty() has a prototype. +*/ +extern int isatty(); +#endif + +/* +** If the following flag is set, then command execution stops +** at an error if we are not interactive. +*/ +static int bail_on_error = 0; + +/* +** Threat stdin as an interactive input if the following variable +** is true. Otherwise, assume stdin is connected to a file or pipe. +*/ +static int stdin_is_interactive = 1; + +/* +** The following is the open SQLite database. We make a pointer +** to this database a static variable so that it can be accessed +** by the SIGINT handler to interrupt database processing. +*/ +static sqlite3 *db = 0; + +/* +** True if an interrupt (Control-C) has been received. +*/ +static volatile int seenInterrupt = 0; + +/* +** This is the name of our program. It is set in main(), used +** in a number of other places, mostly for error messages. +*/ +static char *Argv0; + +/* +** Prompt strings. Initialized in main. Settable with +** .prompt main continue +*/ +static char mainPrompt[20]; /* First line prompt. default: "sqlite> "*/ +static char continuePrompt[20]; /* Continuation prompt. default: " ...> " */ + +/* +** Write I/O traces to the following stream. +*/ +#ifdef SQLITE_ENABLE_IOTRACE +static FILE *iotrace = 0; +#endif + +/* +** This routine works like printf in that its first argument is a +** format string and subsequent arguments are values to be substituted +** in place of % fields. The result of formatting this string +** is written to iotrace. +*/ +#ifdef SQLITE_ENABLE_IOTRACE +static void iotracePrintf(const char *zFormat, ...){ + va_list ap; + char *z; + if( iotrace==0 ) return; + va_start(ap, zFormat); + z = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + fprintf(iotrace, "%s", z); + sqlite3_free(z); +} +#endif + + +/* +** Determines if a string is a number of not. +*/ +static int isNumber(const char *z, int *realnum){ + if( *z=='-' || *z=='+' ) z++; + if( !isdigit(*z) ){ + return 0; + } + z++; + if( realnum ) *realnum = 0; + while( isdigit(*z) ){ z++; } + if( *z=='.' ){ + z++; + if( !isdigit(*z) ) return 0; + while( isdigit(*z) ){ z++; } + if( realnum ) *realnum = 1; + } + if( *z=='e' || *z=='E' ){ + z++; + if( *z=='+' || *z=='-' ) z++; + if( !isdigit(*z) ) return 0; + while( isdigit(*z) ){ z++; } + if( realnum ) *realnum = 1; + } + return *z==0; +} + +/* +** A global char* and an SQL function to access its current value +** from within an SQL statement. This program used to use the +** sqlite_exec_printf() API to substitue a string into an SQL statement. +** The correct way to do this with sqlite3 is to use the bind API, but +** since the shell is built around the callback paradigm it would be a lot +** of work. Instead just use this hack, which is quite harmless. +*/ +static const char *zShellStatic = 0; +static void shellstaticFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + assert( 0==argc ); + assert( zShellStatic ); + sqlite3_result_text(context, zShellStatic, -1, SQLITE_STATIC); +} + + +/* +** This routine reads a line of text from FILE in, stores +** the text in memory obtained from malloc() and returns a pointer +** to the text. NULL is returned at end of file, or if malloc() +** fails. +** +** The interface is like "readline" but no command-line editing +** is done. +*/ +static char *local_getline(char *zPrompt, FILE *in){ + char *zLine; + int nLine; + int n; + int eol; + + if( zPrompt && *zPrompt ){ + printf("%s",zPrompt); + fflush(stdout); + } + nLine = 100; + zLine = malloc( nLine ); + if( zLine==0 ) return 0; + n = 0; + eol = 0; + while( !eol ){ + if( n+100>nLine ){ + nLine = nLine*2 + 100; + zLine = realloc(zLine, nLine); + if( zLine==0 ) return 0; + } + if( fgets(&zLine[n], nLine - n, in)==0 ){ + if( n==0 ){ + free(zLine); + return 0; + } + zLine[n] = 0; + eol = 1; + break; + } + while( zLine[n] ){ n++; } + if( n>0 && zLine[n-1]=='\n' ){ + n--; + zLine[n] = 0; + eol = 1; + } + } + zLine = realloc( zLine, n+1 ); + return zLine; +} + +/* +** Retrieve a single line of input text. +** +** zPrior is a string of prior text retrieved. If not the empty +** string, then issue a continuation prompt. +*/ +static char *one_input_line(const char *zPrior, FILE *in){ + char *zPrompt; + char *zResult; + if( in!=0 ){ + return local_getline(0, in); + } + if( zPrior && zPrior[0] ){ + zPrompt = continuePrompt; + }else{ + zPrompt = mainPrompt; + } + zResult = readline(zPrompt); +#if defined(HAVE_READLINE) && HAVE_READLINE==1 + if( zResult && *zResult ) add_history(zResult); +#endif + return zResult; +} + +struct previous_mode_data { + int valid; /* Is there legit data in here? */ + int mode; + int showHeader; + int colWidth[100]; +}; + +/* +** An pointer to an instance of this structure is passed from +** the main program to the callback. This is used to communicate +** state and mode information. +*/ +struct callback_data { + sqlite3 *db; /* The database */ + int echoOn; /* True to echo input commands */ + int cnt; /* Number of records displayed so far */ + FILE *out; /* Write results here */ + int mode; /* An output mode setting */ + int writableSchema; /* True if PRAGMA writable_schema=ON */ + int showHeader; /* True to show column names in List or Column mode */ + char *zDestTable; /* Name of destination table when MODE_Insert */ + char separator[20]; /* Separator character for MODE_List */ + int colWidth[100]; /* Requested width of each column when in column mode*/ + int actualWidth[100]; /* Actual width of each column */ + char nullvalue[20]; /* The text to print when a NULL comes back from + ** the database */ + struct previous_mode_data explainPrev; + /* Holds the mode information just before + ** .explain ON */ + char outfile[FILENAME_MAX]; /* Filename for *out */ + const char *zDbFilename; /* name of the database file */ +}; + +/* +** These are the allowed modes. +*/ +#define MODE_Line 0 /* One column per line. Blank line between records */ +#define MODE_Column 1 /* One record per line in neat columns */ +#define MODE_List 2 /* One record per line with a separator */ +#define MODE_Semi 3 /* Same as MODE_List but append ";" to each line */ +#define MODE_Html 4 /* Generate an XHTML table */ +#define MODE_Insert 5 /* Generate SQL "insert" statements */ +#define MODE_Tcl 6 /* Generate ANSI-C or TCL quoted elements */ +#define MODE_Csv 7 /* Quote strings, numbers are plain */ +#define MODE_NUM_OF 8 /* The number of modes (not a mode itself) */ + +static const char *modeDescr[MODE_NUM_OF] = { + "line", + "column", + "list", + "semi", + "html", + "insert", + "tcl", + "csv", +}; + +/* +** Number of elements in an array +*/ +#define ArraySize(X) (sizeof(X)/sizeof(X[0])) + +/* +** Output the given string as a quoted string using SQL quoting conventions. +*/ +static void output_quoted_string(FILE *out, const char *z){ + int i; + int nSingle = 0; + for(i=0; z[i]; i++){ + if( z[i]=='\'' ) nSingle++; + } + if( nSingle==0 ){ + fprintf(out,"'%s'",z); + }else{ + fprintf(out,"'"); + while( *z ){ + for(i=0; z[i] && z[i]!='\''; i++){} + if( i==0 ){ + fprintf(out,"''"); + z++; + }else if( z[i]=='\'' ){ + fprintf(out,"%.*s''",i,z); + z += i+1; + }else{ + fprintf(out,"%s",z); + break; + } + } + fprintf(out,"'"); + } +} + +/* +** Output the given string as a quoted according to C or TCL quoting rules. +*/ +static void output_c_string(FILE *out, const char *z){ + unsigned int c; + fputc('"', out); + while( (c = *(z++))!=0 ){ + if( c=='\\' ){ + fputc(c, out); + fputc(c, out); + }else if( c=='\t' ){ + fputc('\\', out); + fputc('t', out); + }else if( c=='\n' ){ + fputc('\\', out); + fputc('n', out); + }else if( c=='\r' ){ + fputc('\\', out); + fputc('r', out); + }else if( !isprint(c) ){ + fprintf(out, "\\%03o", c&0xff); + }else{ + fputc(c, out); + } + } + fputc('"', out); +} + +/* +** Output the given string with characters that are special to +** HTML escaped. +*/ +static void output_html_string(FILE *out, const char *z){ + int i; + while( *z ){ + for(i=0; z[i] && z[i]!='<' && z[i]!='&'; i++){} + if( i>0 ){ + fprintf(out,"%.*s",i,z); + } + if( z[i]=='<' ){ + fprintf(out,"<"); + }else if( z[i]=='&' ){ + fprintf(out,"&"); + }else{ + break; + } + z += i + 1; + } +} + +/* +** If a field contains any character identified by a 1 in the following +** array, then the string must be quoted for CSV. +*/ +static const char needCsvQuote[] = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +}; + +/* +** Output a single term of CSV. Actually, p->separator is used for +** the separator, which may or may not be a comma. p->nullvalue is +** the null value. Strings are quoted using ANSI-C rules. Numbers +** appear outside of quotes. +*/ +static void output_csv(struct callback_data *p, const char *z, int bSep){ + FILE *out = p->out; + if( z==0 ){ + fprintf(out,"%s",p->nullvalue); + }else{ + int i; + for(i=0; z[i]; i++){ + if( needCsvQuote[((unsigned char*)z)[i]] ){ + i = 0; + break; + } + } + if( i==0 ){ + putc('"', out); + for(i=0; z[i]; i++){ + if( z[i]=='"' ) putc('"', out); + putc(z[i], out); + } + putc('"', out); + }else{ + fprintf(out, "%s", z); + } + } + if( bSep ){ + fprintf(p->out, p->separator); + } +} + +#ifdef SIGINT +/* +** This routine runs when the user presses Ctrl-C +*/ +static void interrupt_handler(int NotUsed){ + seenInterrupt = 1; + if( db ) sqlite3_interrupt(db); +} +#endif + +/* +** This is the callback routine that the SQLite library +** invokes for each row of a query result. +*/ +static int callback(void *pArg, int nArg, char **azArg, char **azCol){ + int i; + struct callback_data *p = (struct callback_data*)pArg; + switch( p->mode ){ + case MODE_Line: { + int w = 5; + if( azArg==0 ) break; + for(i=0; iw ) w = len; + } + if( p->cnt++>0 ) fprintf(p->out,"\n"); + for(i=0; iout,"%*s = %s\n", w, azCol[i], + azArg[i] ? azArg[i] : p->nullvalue); + } + break; + } + case MODE_Column: { + if( p->cnt++==0 ){ + for(i=0; icolWidth) ){ + w = p->colWidth[i]; + }else{ + w = 0; + } + if( w<=0 ){ + w = strlen(azCol[i] ? azCol[i] : ""); + if( w<10 ) w = 10; + n = strlen(azArg && azArg[i] ? azArg[i] : p->nullvalue); + if( wactualWidth) ){ + p->actualWidth[i] = w; + } + if( p->showHeader ){ + fprintf(p->out,"%-*.*s%s",w,w,azCol[i], i==nArg-1 ? "\n": " "); + } + } + if( p->showHeader ){ + for(i=0; iactualWidth) ){ + w = p->actualWidth[i]; + }else{ + w = 10; + } + fprintf(p->out,"%-*.*s%s",w,w,"-----------------------------------" + "----------------------------------------------------------", + i==nArg-1 ? "\n": " "); + } + } + } + if( azArg==0 ) break; + for(i=0; iactualWidth) ){ + w = p->actualWidth[i]; + }else{ + w = 10; + } + fprintf(p->out,"%-*.*s%s",w,w, + azArg[i] ? azArg[i] : p->nullvalue, i==nArg-1 ? "\n": " "); + } + break; + } + case MODE_Semi: + case MODE_List: { + if( p->cnt++==0 && p->showHeader ){ + for(i=0; iout,"%s%s",azCol[i], i==nArg-1 ? "\n" : p->separator); + } + } + if( azArg==0 ) break; + for(i=0; inullvalue; + fprintf(p->out, "%s", z); + if( iout, "%s", p->separator); + }else if( p->mode==MODE_Semi ){ + fprintf(p->out, ";\n"); + }else{ + fprintf(p->out, "\n"); + } + } + break; + } + case MODE_Html: { + if( p->cnt++==0 && p->showHeader ){ + fprintf(p->out,""); + for(i=0; iout,"",azCol[i]); + } + fprintf(p->out,"\n"); + } + if( azArg==0 ) break; + fprintf(p->out,""); + for(i=0; iout,"\n"); + } + fprintf(p->out,"\n"); + break; + } + case MODE_Tcl: { + if( p->cnt++==0 && p->showHeader ){ + for(i=0; iout,azCol[i] ? azCol[i] : ""); + fprintf(p->out, "%s", p->separator); + } + fprintf(p->out,"\n"); + } + if( azArg==0 ) break; + for(i=0; iout, azArg[i] ? azArg[i] : p->nullvalue); + fprintf(p->out, "%s", p->separator); + } + fprintf(p->out,"\n"); + break; + } + case MODE_Csv: { + if( p->cnt++==0 && p->showHeader ){ + for(i=0; iout,"\n"); + } + if( azArg==0 ) break; + for(i=0; iout,"\n"); + break; + } + case MODE_Insert: { + if( azArg==0 ) break; + fprintf(p->out,"INSERT INTO %s VALUES(",p->zDestTable); + for(i=0; i0 ? ",": ""; + if( azArg[i]==0 ){ + fprintf(p->out,"%sNULL",zSep); + }else if( isNumber(azArg[i], 0) ){ + fprintf(p->out,"%s%s",zSep, azArg[i]); + }else{ + if( zSep[0] ) fprintf(p->out,"%s",zSep); + output_quoted_string(p->out, azArg[i]); + } + } + fprintf(p->out,");\n"); + break; + } + } + return 0; +} + +/* +** Set the destination table field of the callback_data structure to +** the name of the table given. Escape any quote characters in the +** table name. +*/ +static void set_table_name(struct callback_data *p, const char *zName){ + int i, n; + int needQuote; + char *z; + + if( p->zDestTable ){ + free(p->zDestTable); + p->zDestTable = 0; + } + if( zName==0 ) return; + needQuote = !isalpha((unsigned char)*zName) && *zName!='_'; + for(i=n=0; zName[i]; i++, n++){ + if( !isalnum((unsigned char)zName[i]) && zName[i]!='_' ){ + needQuote = 1; + if( zName[i]=='\'' ) n++; + } + } + if( needQuote ) n += 2; + z = p->zDestTable = malloc( n+1 ); + if( z==0 ){ + fprintf(stderr,"Out of memory!\n"); + exit(1); + } + n = 0; + if( needQuote ) z[n++] = '\''; + for(i=0; zName[i]; i++){ + z[n++] = zName[i]; + if( zName[i]=='\'' ) z[n++] = '\''; + } + if( needQuote ) z[n++] = '\''; + z[n] = 0; +} + +/* zIn is either a pointer to a NULL-terminated string in memory obtained +** from malloc(), or a NULL pointer. The string pointed to by zAppend is +** added to zIn, and the result returned in memory obtained from malloc(). +** zIn, if it was not NULL, is freed. +** +** If the third argument, quote, is not '\0', then it is used as a +** quote character for zAppend. +*/ +static char *appendText(char *zIn, char const *zAppend, char quote){ + int len; + int i; + int nAppend = strlen(zAppend); + int nIn = (zIn?strlen(zIn):0); + + len = nAppend+nIn+1; + if( quote ){ + len += 2; + for(i=0; iout, "DELETE FROM sqlite_sequence;\n"); + }else if( strcmp(zTable, "sqlite_stat1")==0 ){ + fprintf(p->out, "ANALYZE sqlite_master;\n"); + }else if( strncmp(zTable, "sqlite_", 7)==0 ){ + return 0; + }else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){ + char *zIns; + if( !p->writableSchema ){ + fprintf(p->out, "PRAGMA writable_schema=ON;\n"); + p->writableSchema = 1; + } + zIns = sqlite3_mprintf( + "INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)" + "VALUES('table','%q','%q',0,'%q');", + zTable, zTable, zSql); + fprintf(p->out, "%s\n", zIns); + sqlite3_free(zIns); + return 0; + }else{ + fprintf(p->out, "%s;\n", zSql); + } + + if( strcmp(zType, "table")==0 ){ + sqlite3_stmt *pTableInfo = 0; + char *zSelect = 0; + char *zTableInfo = 0; + char *zTmp = 0; + + zTableInfo = appendText(zTableInfo, "PRAGMA table_info(", 0); + zTableInfo = appendText(zTableInfo, zTable, '"'); + zTableInfo = appendText(zTableInfo, ");", 0); + + rc = sqlite3_prepare(p->db, zTableInfo, -1, &pTableInfo, 0); + if( zTableInfo ) free(zTableInfo); + if( rc!=SQLITE_OK || !pTableInfo ){ + return 1; + } + + zSelect = appendText(zSelect, "SELECT 'INSERT INTO ' || ", 0); + zTmp = appendText(zTmp, zTable, '"'); + if( zTmp ){ + zSelect = appendText(zSelect, zTmp, '\''); + } + zSelect = appendText(zSelect, " || ' VALUES(' || ", 0); + rc = sqlite3_step(pTableInfo); + while( rc==SQLITE_ROW ){ + const char *zText = (const char *)sqlite3_column_text(pTableInfo, 1); + zSelect = appendText(zSelect, "quote(", 0); + zSelect = appendText(zSelect, zText, '"'); + rc = sqlite3_step(pTableInfo); + if( rc==SQLITE_ROW ){ + zSelect = appendText(zSelect, ") || ',' || ", 0); + }else{ + zSelect = appendText(zSelect, ") ", 0); + } + } + rc = sqlite3_finalize(pTableInfo); + if( rc!=SQLITE_OK ){ + if( zSelect ) free(zSelect); + return 1; + } + zSelect = appendText(zSelect, "|| ')' FROM ", 0); + zSelect = appendText(zSelect, zTable, '"'); + + rc = run_table_dump_query(p->out, p->db, zSelect); + if( rc==SQLITE_CORRUPT ){ + zSelect = appendText(zSelect, " ORDER BY rowid DESC", 0); + rc = run_table_dump_query(p->out, p->db, zSelect); + } + if( zSelect ) free(zSelect); + } + return 0; +} + +/* +** Run zQuery. Use dump_callback() as the callback routine so that +** the contents of the query are output as SQL statements. +** +** If we get a SQLITE_CORRUPT error, rerun the query after appending +** "ORDER BY rowid DESC" to the end. +*/ +static int run_schema_dump_query( + struct callback_data *p, + const char *zQuery, + char **pzErrMsg +){ + int rc; + rc = sqlite3_exec(p->db, zQuery, dump_callback, p, pzErrMsg); + if( rc==SQLITE_CORRUPT ){ + char *zQ2; + int len = strlen(zQuery); + if( pzErrMsg ) sqlite3_free(*pzErrMsg); + zQ2 = malloc( len+100 ); + if( zQ2==0 ) return rc; + sqlite3_snprintf(sizeof(zQ2), zQ2, "%s ORDER BY rowid DESC", zQuery); + rc = sqlite3_exec(p->db, zQ2, dump_callback, p, pzErrMsg); + free(zQ2); + } + return rc; +} + +/* +** Text of a help message +*/ +static char zHelp[] = + ".bail ON|OFF Stop after hitting an error. Default OFF\n" + ".databases List names and files of attached databases\n" + ".dump ?TABLE? ... Dump the database in an SQL text format\n" + ".echo ON|OFF Turn command echo on or off\n" + ".exit Exit this program\n" + ".explain ON|OFF Turn output mode suitable for EXPLAIN on or off.\n" + ".header(s) ON|OFF Turn display of headers on or off\n" + ".help Show this message\n" + ".import FILE TABLE Import data from FILE into TABLE\n" + ".indices TABLE Show names of all indices on TABLE\n" +#ifdef SQLITE_ENABLE_IOTRACE + ".iotrace FILE Enable I/O diagnostic logging to FILE\n" +#endif +#ifndef SQLITE_OMIT_LOAD_EXTENSION + ".load FILE ?ENTRY? Load an extension library\n" +#endif + ".mode MODE ?TABLE? Set output mode where MODE is one of:\n" + " csv Comma-separated values\n" + " column Left-aligned columns. (See .width)\n" + " html HTML
%s
"); + output_html_string(p->out, azArg[i] ? azArg[i] : p->nullvalue); + fprintf(p->out,"
code\n" + " insert SQL insert statements for TABLE\n" + " line One value per line\n" + " list Values delimited by .separator string\n" + " tabs Tab-separated values\n" + " tcl TCL list elements\n" + ".nullvalue STRING Print STRING in place of NULL values\n" + ".output FILENAME Send output to FILENAME\n" + ".output stdout Send output to the screen\n" + ".prompt MAIN CONTINUE Replace the standard prompts\n" + ".quit Exit this program\n" + ".read FILENAME Execute SQL in FILENAME\n" + ".schema ?TABLE? Show the CREATE statements\n" + ".separator STRING Change separator used by output mode and .import\n" + ".show Show the current values for various settings\n" + ".tables ?PATTERN? List names of tables matching a LIKE pattern\n" + ".timeout MS Try opening locked tables for MS milliseconds\n" + ".width NUM NUM ... Set column widths for \"column\" mode\n" +; + +/* Forward reference */ +static int process_input(struct callback_data *p, FILE *in); + +/* +** Make sure the database is open. If it is not, then open it. If +** the database fails to open, print an error message and exit. +*/ +static void open_db(struct callback_data *p){ + if( p->db==0 ){ + sqlite3_open(p->zDbFilename, &p->db); + db = p->db; + sqlite3_create_function(db, "shellstatic", 0, SQLITE_UTF8, 0, + shellstaticFunc, 0, 0); + if( SQLITE_OK!=sqlite3_errcode(db) ){ + fprintf(stderr,"Unable to open database \"%s\": %s\n", + p->zDbFilename, sqlite3_errmsg(db)); + exit(1); + } +#ifndef SQLITE_OMIT_LOAD_EXTENSION + sqlite3_enable_load_extension(p->db, 1); +#endif + } +} + +/* +** Do C-language style dequoting. +** +** \t -> tab +** \n -> newline +** \r -> carriage return +** \NNN -> ascii character NNN in octal +** \\ -> backslash +*/ +static void resolve_backslashes(char *z){ + int i, j, c; + for(i=j=0; (c = z[i])!=0; i++, j++){ + if( c=='\\' ){ + c = z[++i]; + if( c=='n' ){ + c = '\n'; + }else if( c=='t' ){ + c = '\t'; + }else if( c=='r' ){ + c = '\r'; + }else if( c>='0' && c<='7' ){ + c -= '0'; + if( z[i+1]>='0' && z[i+1]<='7' ){ + i++; + c = (c<<3) + z[i] - '0'; + if( z[i+1]>='0' && z[i+1]<='7' ){ + i++; + c = (c<<3) + z[i] - '0'; + } + } + } + } + z[j] = c; + } + z[j] = 0; +} + +/* +** Interpret zArg as a boolean value. Return either 0 or 1. +*/ +static int booleanValue(char *zArg){ + int val = atoi(zArg); + int j; + for(j=0; zArg[j]; j++){ + zArg[j] = tolower(zArg[j]); + } + if( strcmp(zArg,"on")==0 ){ + val = 1; + }else if( strcmp(zArg,"yes")==0 ){ + val = 1; + } + return val; +} + +/* +** If an input line begins with "." then invoke this routine to +** process that line. +** +** Return 1 on error, 2 to exit, and 0 otherwise. +*/ +static int do_meta_command(char *zLine, struct callback_data *p){ + int i = 1; + int nArg = 0; + int n, c; + int rc = 0; + char *azArg[50]; + + /* Parse the input line into tokens. + */ + while( zLine[i] && nArg1 && strncmp(azArg[0], "bail", n)==0 && nArg>1 ){ + bail_on_error = booleanValue(azArg[1]); + }else + + if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 ){ + struct callback_data data; + char *zErrMsg = 0; + open_db(p); + memcpy(&data, p, sizeof(data)); + data.showHeader = 1; + data.mode = MODE_Column; + data.colWidth[0] = 3; + data.colWidth[1] = 15; + data.colWidth[2] = 58; + data.cnt = 0; + sqlite3_exec(p->db, "PRAGMA database_list; ", callback, &data, &zErrMsg); + if( zErrMsg ){ + fprintf(stderr,"Error: %s\n", zErrMsg); + sqlite3_free(zErrMsg); + } + }else + + if( c=='d' && strncmp(azArg[0], "dump", n)==0 ){ + char *zErrMsg = 0; + open_db(p); + fprintf(p->out, "BEGIN TRANSACTION;\n"); + p->writableSchema = 0; + if( nArg==1 ){ + run_schema_dump_query(p, + "SELECT name, type, sql FROM sqlite_master " + "WHERE sql NOT NULL AND type=='table'", 0 + ); + run_table_dump_query(p->out, p->db, + "SELECT sql FROM sqlite_master " + "WHERE sql NOT NULL AND type IN ('index','trigger','view')" + ); + }else{ + int i; + for(i=1; iout, p->db, + "SELECT sql FROM sqlite_master " + "WHERE sql NOT NULL" + " AND type IN ('index','trigger','view')" + " AND tbl_name LIKE shellstatic()" + ); + zShellStatic = 0; + } + } + if( p->writableSchema ){ + fprintf(p->out, "PRAGMA writable_schema=OFF;\n"); + p->writableSchema = 0; + } + if( zErrMsg ){ + fprintf(stderr,"Error: %s\n", zErrMsg); + sqlite3_free(zErrMsg); + }else{ + fprintf(p->out, "COMMIT;\n"); + } + }else + + if( c=='e' && strncmp(azArg[0], "echo", n)==0 && nArg>1 ){ + p->echoOn = booleanValue(azArg[1]); + }else + + if( c=='e' && strncmp(azArg[0], "exit", n)==0 ){ + rc = 2; + }else + + if( c=='e' && strncmp(azArg[0], "explain", n)==0 ){ + int val = nArg>=2 ? booleanValue(azArg[1]) : 1; + if(val == 1) { + if(!p->explainPrev.valid) { + p->explainPrev.valid = 1; + p->explainPrev.mode = p->mode; + p->explainPrev.showHeader = p->showHeader; + memcpy(p->explainPrev.colWidth,p->colWidth,sizeof(p->colWidth)); + } + /* We could put this code under the !p->explainValid + ** condition so that it does not execute if we are already in + ** explain mode. However, always executing it allows us an easy + ** was to reset to explain mode in case the user previously + ** did an .explain followed by a .width, .mode or .header + ** command. + */ + p->mode = MODE_Column; + p->showHeader = 1; + memset(p->colWidth,0,ArraySize(p->colWidth)); + p->colWidth[0] = 4; + p->colWidth[1] = 14; + p->colWidth[2] = 10; + p->colWidth[3] = 10; + p->colWidth[4] = 33; + }else if (p->explainPrev.valid) { + p->explainPrev.valid = 0; + p->mode = p->explainPrev.mode; + p->showHeader = p->explainPrev.showHeader; + memcpy(p->colWidth,p->explainPrev.colWidth,sizeof(p->colWidth)); + } + }else + + if( c=='h' && (strncmp(azArg[0], "header", n)==0 || + strncmp(azArg[0], "headers", n)==0 )&& nArg>1 ){ + p->showHeader = booleanValue(azArg[1]); + }else + + if( c=='h' && strncmp(azArg[0], "help", n)==0 ){ + fprintf(stderr,zHelp); + }else + + if( c=='i' && strncmp(azArg[0], "import", n)==0 && nArg>=3 ){ + char *zTable = azArg[2]; /* Insert data into this table */ + char *zFile = azArg[1]; /* The file from which to extract data */ + sqlite3_stmt *pStmt; /* A statement */ + int rc; /* Result code */ + int nCol; /* Number of columns in the table */ + int nByte; /* Number of bytes in an SQL string */ + int i, j; /* Loop counters */ + int nSep; /* Number of bytes in p->separator[] */ + char *zSql; /* An SQL statement */ + char *zLine; /* A single line of input from the file */ + char **azCol; /* zLine[] broken up into columns */ + char *zCommit; /* How to commit changes */ + FILE *in; /* The input file */ + int lineno = 0; /* Line number of input file */ + + open_db(p); + nSep = strlen(p->separator); + if( nSep==0 ){ + fprintf(stderr, "non-null separator required for import\n"); + return 0; + } + zSql = sqlite3_mprintf("SELECT * FROM '%q'", zTable); + if( zSql==0 ) return 0; + nByte = strlen(zSql); + rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + if( rc ){ + fprintf(stderr,"Error: %s\n", sqlite3_errmsg(db)); + nCol = 0; + rc = 1; + }else{ + nCol = sqlite3_column_count(pStmt); + } + sqlite3_finalize(pStmt); + if( nCol==0 ) return 0; + zSql = malloc( nByte + 20 + nCol*2 ); + if( zSql==0 ) return 0; + sqlite3_snprintf(nByte+20, zSql, "INSERT INTO '%q' VALUES(?", zTable); + j = strlen(zSql); + for(i=1; idb, zSql, -1, &pStmt, 0); + free(zSql); + if( rc ){ + fprintf(stderr, "Error: %s\n", sqlite3_errmsg(db)); + sqlite3_finalize(pStmt); + return 1; + } + in = fopen(zFile, "rb"); + if( in==0 ){ + fprintf(stderr, "cannot open file: %s\n", zFile); + sqlite3_finalize(pStmt); + return 0; + } + azCol = malloc( sizeof(azCol[0])*(nCol+1) ); + if( azCol==0 ){ + fclose(in); + return 0; + } + sqlite3_exec(p->db, "BEGIN", 0, 0, 0); + zCommit = "COMMIT"; + while( (zLine = local_getline(0, in))!=0 ){ + char *z; + i = 0; + lineno++; + azCol[0] = zLine; + for(i=0, z=zLine; *z && *z!='\n' && *z!='\r'; z++){ + if( *z==p->separator[0] && strncmp(z, p->separator, nSep)==0 ){ + *z = 0; + i++; + if( idb, zCommit, 0, 0, 0); + }else + + if( c=='i' && strncmp(azArg[0], "indices", n)==0 && nArg>1 ){ + struct callback_data data; + char *zErrMsg = 0; + open_db(p); + memcpy(&data, p, sizeof(data)); + data.showHeader = 0; + data.mode = MODE_List; + zShellStatic = azArg[1]; + sqlite3_exec(p->db, + "SELECT name FROM sqlite_master " + "WHERE type='index' AND tbl_name LIKE shellstatic() " + "UNION ALL " + "SELECT name FROM sqlite_temp_master " + "WHERE type='index' AND tbl_name LIKE shellstatic() " + "ORDER BY 1", + callback, &data, &zErrMsg + ); + zShellStatic = 0; + if( zErrMsg ){ + fprintf(stderr,"Error: %s\n", zErrMsg); + sqlite3_free(zErrMsg); + } + }else + +#ifdef SQLITE_ENABLE_IOTRACE + if( c=='i' && strncmp(azArg[0], "iotrace", n)==0 ){ + extern void (*sqlite3_io_trace)(const char*, ...); + if( iotrace && iotrace!=stdout ) fclose(iotrace); + iotrace = 0; + if( nArg<2 ){ + sqlite3_io_trace = 0; + }else if( strcmp(azArg[1], "-")==0 ){ + sqlite3_io_trace = iotracePrintf; + iotrace = stdout; + }else{ + iotrace = fopen(azArg[1], "w"); + if( iotrace==0 ){ + fprintf(stderr, "cannot open \"%s\"\n", azArg[1]); + sqlite3_io_trace = 0; + }else{ + sqlite3_io_trace = iotracePrintf; + } + } + }else +#endif + +#ifndef SQLITE_OMIT_LOAD_EXTENSION + if( c=='l' && strncmp(azArg[0], "load", n)==0 && nArg>=2 ){ + const char *zFile, *zProc; + char *zErrMsg = 0; + int rc; + zFile = azArg[1]; + zProc = nArg>=3 ? azArg[2] : 0; + open_db(p); + rc = sqlite3_load_extension(p->db, zFile, zProc, &zErrMsg); + if( rc!=SQLITE_OK ){ + fprintf(stderr, "%s\n", zErrMsg); + sqlite3_free(zErrMsg); + rc = 1; + } + }else +#endif + + if( c=='m' && strncmp(azArg[0], "mode", n)==0 && nArg>=2 ){ + int n2 = strlen(azArg[1]); + if( strncmp(azArg[1],"line",n2)==0 + || + strncmp(azArg[1],"lines",n2)==0 ){ + p->mode = MODE_Line; + }else if( strncmp(azArg[1],"column",n2)==0 + || + strncmp(azArg[1],"columns",n2)==0 ){ + p->mode = MODE_Column; + }else if( strncmp(azArg[1],"list",n2)==0 ){ + p->mode = MODE_List; + }else if( strncmp(azArg[1],"html",n2)==0 ){ + p->mode = MODE_Html; + }else if( strncmp(azArg[1],"tcl",n2)==0 ){ + p->mode = MODE_Tcl; + }else if( strncmp(azArg[1],"csv",n2)==0 ){ + p->mode = MODE_Csv; + sqlite3_snprintf(sizeof(p->separator), p->separator, ","); + }else if( strncmp(azArg[1],"tabs",n2)==0 ){ + p->mode = MODE_List; + sqlite3_snprintf(sizeof(p->separator), p->separator, "\t"); + }else if( strncmp(azArg[1],"insert",n2)==0 ){ + p->mode = MODE_Insert; + if( nArg>=3 ){ + set_table_name(p, azArg[2]); + }else{ + set_table_name(p, "table"); + } + }else { + fprintf(stderr,"mode should be one of: " + "column csv html insert line list tabs tcl\n"); + } + }else + + if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 && nArg==2 ) { + sqlite3_snprintf(sizeof(p->nullvalue), p->nullvalue, + "%.*s", (int)ArraySize(p->nullvalue)-1, azArg[1]); + }else + + if( c=='o' && strncmp(azArg[0], "output", n)==0 && nArg==2 ){ + if( p->out!=stdout ){ + fclose(p->out); + } + if( strcmp(azArg[1],"stdout")==0 ){ + p->out = stdout; + sqlite3_snprintf(sizeof(p->outfile), p->outfile, "stdout"); + }else{ + p->out = fopen(azArg[1], "wb"); + if( p->out==0 ){ + fprintf(stderr,"can't write to \"%s\"\n", azArg[1]); + p->out = stdout; + } else { + sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", azArg[1]); + } + } + }else + + if( c=='p' && strncmp(azArg[0], "prompt", n)==0 && (nArg==2 || nArg==3)){ + if( nArg >= 2) { + strncpy(mainPrompt,azArg[1],(int)ArraySize(mainPrompt)-1); + } + if( nArg >= 3) { + strncpy(continuePrompt,azArg[2],(int)ArraySize(continuePrompt)-1); + } + }else + + if( c=='q' && strncmp(azArg[0], "quit", n)==0 ){ + rc = 2; + }else + + if( c=='r' && strncmp(azArg[0], "read", n)==0 && nArg==2 ){ + FILE *alt = fopen(azArg[1], "rb"); + if( alt==0 ){ + fprintf(stderr,"can't open \"%s\"\n", azArg[1]); + }else{ + process_input(p, alt); + fclose(alt); + } + }else + + if( c=='s' && strncmp(azArg[0], "schema", n)==0 ){ + struct callback_data data; + char *zErrMsg = 0; + open_db(p); + memcpy(&data, p, sizeof(data)); + data.showHeader = 0; + data.mode = MODE_Semi; + if( nArg>1 ){ + int i; + for(i=0; azArg[1][i]; i++) azArg[1][i] = tolower(azArg[1][i]); + if( strcmp(azArg[1],"sqlite_master")==0 ){ + char *new_argv[2], *new_colv[2]; + new_argv[0] = "CREATE TABLE sqlite_master (\n" + " type text,\n" + " name text,\n" + " tbl_name text,\n" + " rootpage integer,\n" + " sql text\n" + ")"; + new_argv[1] = 0; + new_colv[0] = "sql"; + new_colv[1] = 0; + callback(&data, 1, new_argv, new_colv); + }else if( strcmp(azArg[1],"sqlite_temp_master")==0 ){ + char *new_argv[2], *new_colv[2]; + new_argv[0] = "CREATE TEMP TABLE sqlite_temp_master (\n" + " type text,\n" + " name text,\n" + " tbl_name text,\n" + " rootpage integer,\n" + " sql text\n" + ")"; + new_argv[1] = 0; + new_colv[0] = "sql"; + new_colv[1] = 0; + callback(&data, 1, new_argv, new_colv); + }else{ + zShellStatic = azArg[1]; + sqlite3_exec(p->db, + "SELECT sql FROM " + " (SELECT * FROM sqlite_master UNION ALL" + " SELECT * FROM sqlite_temp_master) " + "WHERE tbl_name LIKE shellstatic() AND type!='meta' AND sql NOTNULL " + "ORDER BY substr(type,2,1), name", + callback, &data, &zErrMsg); + zShellStatic = 0; + } + }else{ + sqlite3_exec(p->db, + "SELECT sql FROM " + " (SELECT * FROM sqlite_master UNION ALL" + " SELECT * FROM sqlite_temp_master) " + "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%'" + "ORDER BY substr(type,2,1), name", + callback, &data, &zErrMsg + ); + } + if( zErrMsg ){ + fprintf(stderr,"Error: %s\n", zErrMsg); + sqlite3_free(zErrMsg); + } + }else + + if( c=='s' && strncmp(azArg[0], "separator", n)==0 && nArg==2 ){ + sqlite3_snprintf(sizeof(p->separator), p->separator, + "%.*s", (int)sizeof(p->separator)-1, azArg[1]); + }else + + if( c=='s' && strncmp(azArg[0], "show", n)==0){ + int i; + fprintf(p->out,"%9.9s: %s\n","echo", p->echoOn ? "on" : "off"); + fprintf(p->out,"%9.9s: %s\n","explain", p->explainPrev.valid ? "on" :"off"); + fprintf(p->out,"%9.9s: %s\n","headers", p->showHeader ? "on" : "off"); + fprintf(p->out,"%9.9s: %s\n","mode", modeDescr[p->mode]); + fprintf(p->out,"%9.9s: ", "nullvalue"); + output_c_string(p->out, p->nullvalue); + fprintf(p->out, "\n"); + fprintf(p->out,"%9.9s: %s\n","output", + strlen(p->outfile) ? p->outfile : "stdout"); + fprintf(p->out,"%9.9s: ", "separator"); + output_c_string(p->out, p->separator); + fprintf(p->out, "\n"); + fprintf(p->out,"%9.9s: ","width"); + for (i=0;i<(int)ArraySize(p->colWidth) && p->colWidth[i] != 0;i++) { + fprintf(p->out,"%d ",p->colWidth[i]); + } + fprintf(p->out,"\n"); + }else + + if( c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0 ){ + char **azResult; + int nRow, rc; + char *zErrMsg; + open_db(p); + if( nArg==1 ){ + rc = sqlite3_get_table(p->db, + "SELECT name FROM sqlite_master " + "WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%'" + "UNION ALL " + "SELECT name FROM sqlite_temp_master " + "WHERE type IN ('table','view') " + "ORDER BY 1", + &azResult, &nRow, 0, &zErrMsg + ); + }else{ + zShellStatic = azArg[1]; + rc = sqlite3_get_table(p->db, + "SELECT name FROM sqlite_master " + "WHERE type IN ('table','view') AND name LIKE '%'||shellstatic()||'%' " + "UNION ALL " + "SELECT name FROM sqlite_temp_master " + "WHERE type IN ('table','view') AND name LIKE '%'||shellstatic()||'%' " + "ORDER BY 1", + &azResult, &nRow, 0, &zErrMsg + ); + zShellStatic = 0; + } + if( zErrMsg ){ + fprintf(stderr,"Error: %s\n", zErrMsg); + sqlite3_free(zErrMsg); + } + if( rc==SQLITE_OK ){ + int len, maxlen = 0; + int i, j; + int nPrintCol, nPrintRow; + for(i=1; i<=nRow; i++){ + if( azResult[i]==0 ) continue; + len = strlen(azResult[i]); + if( len>maxlen ) maxlen = len; + } + nPrintCol = 80/(maxlen+2); + if( nPrintCol<1 ) nPrintCol = 1; + nPrintRow = (nRow + nPrintCol - 1)/nPrintCol; + for(i=0; i1 && strncmp(azArg[0], "timeout", n)==0 && nArg>=2 ){ + open_db(p); + sqlite3_busy_timeout(p->db, atoi(azArg[1])); + }else + + if( c=='w' && strncmp(azArg[0], "width", n)==0 ){ + int j; + assert( nArg<=ArraySize(azArg) ); + for(j=1; jcolWidth); j++){ + p->colWidth[j-1] = atoi(azArg[j]); + } + }else + + { + fprintf(stderr, "unknown command or invalid arguments: " + " \"%s\". Enter \".help\" for help\n", azArg[0]); + } + + return rc; +} + +/* +** Return TRUE if a semicolon occurs anywhere in the first N characters +** of string z[]. +*/ +static int _contains_semicolon(const char *z, int N){ + int i; + for(i=0; iout); + free(zLine); + zLine = one_input_line(zSql, in); + if( zLine==0 ){ + break; /* We have reached EOF */ + } + if( seenInterrupt ){ + if( in!=0 ) break; + seenInterrupt = 0; + } + lineno++; + if( p->echoOn ) printf("%s\n", zLine); + if( (zSql==0 || zSql[0]==0) && _all_whitespace(zLine) ) continue; + if( zLine && zLine[0]=='.' && nSql==0 ){ + rc = do_meta_command(zLine, p); + if( rc==2 ){ + break; + }else if( rc ){ + errCnt++; + } + continue; + } + if( _is_command_terminator(zLine) ){ + memcpy(zLine,";",2); + } + nSqlPrior = nSql; + if( zSql==0 ){ + int i; + for(i=0; zLine[i] && isspace((unsigned char)zLine[i]); i++){} + if( zLine[i]!=0 ){ + nSql = strlen(zLine); + zSql = malloc( nSql+1 ); + if( zSql==0 ){ + fprintf(stderr, "out of memory\n"); + exit(1); + } + memcpy(zSql, zLine, nSql+1); + startline = lineno; + } + }else{ + int len = strlen(zLine); + zSql = realloc( zSql, nSql + len + 2 ); + if( zSql==0 ){ + fprintf(stderr,"%s: out of memory!\n", Argv0); + exit(1); + } + zSql[nSql++] = '\n'; + memcpy(&zSql[nSql], zLine, len+1); + nSql += len; + } + if( zSql && _contains_semicolon(&zSql[nSqlPrior], nSql-nSqlPrior) + && sqlite3_complete(zSql) ){ + p->cnt = 0; + open_db(p); + rc = sqlite3_exec(p->db, zSql, callback, p, &zErrMsg); + if( rc || zErrMsg ){ + char zPrefix[100]; + if( in!=0 || !stdin_is_interactive ){ + sqlite3_snprintf(sizeof(zPrefix), zPrefix, + "SQL error near line %d:", startline); + }else{ + sqlite3_snprintf(sizeof(zPrefix), zPrefix, "SQL error:"); + } + if( zErrMsg!=0 ){ + printf("%s %s\n", zPrefix, zErrMsg); + sqlite3_free(zErrMsg); + zErrMsg = 0; + }else{ + printf("%s %s\n", zPrefix, sqlite3_errmsg(p->db)); + } + errCnt++; + } + free(zSql); + zSql = 0; + nSql = 0; + } + } + if( zSql ){ + if( !_all_whitespace(zSql) ) printf("Incomplete SQL: %s\n", zSql); + free(zSql); + } + free(zLine); + return errCnt; +} + +/* +** Return a pathname which is the user's home directory. A +** 0 return indicates an error of some kind. Space to hold the +** resulting string is obtained from malloc(). The calling +** function should free the result. +*/ +static char *find_home_dir(void){ + char *home_dir = NULL; + +#if !defined(_WIN32) && !defined(WIN32) && !defined(__MACOS__) && !defined(__OS2__) + struct passwd *pwent; + uid_t uid = getuid(); + if( (pwent=getpwuid(uid)) != NULL) { + home_dir = pwent->pw_dir; + } +#endif + +#ifdef __MACOS__ + char home_path[_MAX_PATH+1]; + home_dir = getcwd(home_path, _MAX_PATH); +#endif + +#if defined(_WIN32) || defined(WIN32) || defined(__OS2__) + if (!home_dir) { + home_dir = getenv("USERPROFILE"); + } +#endif + + if (!home_dir) { + home_dir = getenv("HOME"); + } + +#if defined(_WIN32) || defined(WIN32) || defined(__OS2__) + if (!home_dir) { + char *zDrive, *zPath; + int n; + zDrive = getenv("HOMEDRIVE"); + zPath = getenv("HOMEPATH"); + if( zDrive && zPath ){ + n = strlen(zDrive) + strlen(zPath) + 1; + home_dir = malloc( n ); + if( home_dir==0 ) return 0; + sqlite3_snprintf(n, home_dir, "%s%s", zDrive, zPath); + return home_dir; + } + home_dir = "c:\\"; + } +#endif + + if( home_dir ){ + int n = strlen(home_dir) + 1; + char *z = malloc( n ); + if( z ) memcpy(z, home_dir, n); + home_dir = z; + } + + return home_dir; +} + +/* +** Read input from the file given by sqliterc_override. Or if that +** parameter is NULL, take input from ~/.sqliterc +*/ +static void process_sqliterc( + struct callback_data *p, /* Configuration data */ + const char *sqliterc_override /* Name of config file. NULL to use default */ +){ + char *home_dir = NULL; + const char *sqliterc = sqliterc_override; + char *zBuf = 0; + FILE *in = NULL; + int nBuf; + + if (sqliterc == NULL) { + home_dir = find_home_dir(); + if( home_dir==0 ){ + fprintf(stderr,"%s: cannot locate your home directory!\n", Argv0); + return; + } + nBuf = strlen(home_dir) + 16; + zBuf = malloc( nBuf ); + if( zBuf==0 ){ + fprintf(stderr,"%s: out of memory!\n", Argv0); + exit(1); + } + sqlite3_snprintf(nBuf, zBuf,"%s/.sqliterc",home_dir); + free(home_dir); + sqliterc = (const char*)zBuf; + } + in = fopen(sqliterc,"rb"); + if( in ){ + if( stdin_is_interactive ){ + printf("-- Loading resources from %s\n",sqliterc); + } + process_input(p,in); + fclose(in); + } + free(zBuf); + return; +} + +/* +** Show available command line options +*/ +static const char zOptions[] = + " -init filename read/process named file\n" + " -echo print commands before execution\n" + " -[no]header turn headers on or off\n" + " -bail stop after hitting an error\n" + " -interactive force interactive I/O\n" + " -batch force batch I/O\n" + " -column set output mode to 'column'\n" + " -csv set output mode to 'csv'\n" + " -html set output mode to HTML\n" + " -line set output mode to 'line'\n" + " -list set output mode to 'list'\n" + " -separator 'x' set output field separator (|)\n" + " -nullvalue 'text' set text string for NULL values\n" + " -version show SQLite version\n" +; +static void usage(int showDetail){ + fprintf(stderr, + "Usage: %s [OPTIONS] FILENAME [SQL]\n" + "FILENAME is the name of an SQLite database. A new database is created\n" + "if the file does not previously exist.\n", Argv0); + if( showDetail ){ + fprintf(stderr, "OPTIONS include:\n%s", zOptions); + }else{ + fprintf(stderr, "Use the -help option for additional information\n"); + } + exit(1); +} + +/* +** Initialize the state information in data +*/ +static void main_init(struct callback_data *data) { + memset(data, 0, sizeof(*data)); + data->mode = MODE_List; + memcpy(data->separator,"|", 2); + data->showHeader = 0; + sqlite3_snprintf(sizeof(mainPrompt), mainPrompt,"sqlite> "); + sqlite3_snprintf(sizeof(continuePrompt), continuePrompt," ...> "); +} + +int main(int argc, char **argv){ + char *zErrMsg = 0; + struct callback_data data; + const char *zInitFile = 0; + char *zFirstCmd = 0; + int i; + int rc = 0; + +#ifdef __MACOS__ + argc = ccommand(&argv); +#endif + + Argv0 = argv[0]; + main_init(&data); + stdin_is_interactive = isatty(0); + + /* Make sure we have a valid signal handler early, before anything + ** else is done. + */ +#ifdef SIGINT + signal(SIGINT, interrupt_handler); +#endif + + /* Do an initial pass through the command-line argument to locate + ** the name of the database file, the name of the initialization file, + ** and the first command to execute. + */ + for(i=1; i /* Needed for the definition of va_list */ + +/* +** Make sure we can call this stuff from C++. +*/ +#ifdef __cplusplus +extern "C" { +#endif + + +/* +** Add the ability to override 'extern' +*/ +#ifndef SQLITE_EXTERN +# define SQLITE_EXTERN extern +#endif + +/* +** Make sure these symbols where not defined by some previous header +** file. +*/ +#ifdef SQLITE_VERSION +# undef SQLITE_VERSION +#endif +#ifdef SQLITE_VERSION_NUMBER +# undef SQLITE_VERSION_NUMBER +#endif + +/* +** CAPI3REF: Compile-Time Library Version Numbers +** +** The version of the SQLite library is contained in the sqlite3.h +** header file in a #define named SQLITE_VERSION. The SQLITE_VERSION +** macro resolves to a string constant. +** +** The format of the version string is "X.Y.Z", where +** X is the major version number, Y is the minor version number and Z +** is the release number. The X.Y.Z might be followed by "alpha" or "beta". +** For example "3.1.1beta". +** +** The X value is always 3 in SQLite. The X value only changes when +** backwards compatibility is broken and we intend to never break +** backwards compatibility. The Y value only changes when +** there are major feature enhancements that are forwards compatible +** but not backwards compatible. The Z value is incremented with +** each release but resets back to 0 when Y is incremented. +** +** The SQLITE_VERSION_NUMBER is an integer with the value +** (X*1000000 + Y*1000 + Z). For example, for version "3.1.1beta", +** SQLITE_VERSION_NUMBER is set to 3001001. To detect if they are using +** version 3.1.1 or greater at compile time, programs may use the test +** (SQLITE_VERSION_NUMBER>=3001001). +** +** See also: [sqlite3_libversion()] and [sqlite3_libversion_number()]. +*/ +#define SQLITE_VERSION "--VERS--" +#define SQLITE_VERSION_NUMBER --VERSION-NUMBER-- + +/* +** CAPI3REF: Run-Time Library Version Numbers +** +** These routines return values equivalent to the header constants +** [SQLITE_VERSION] and [SQLITE_VERSION_NUMBER]. The values returned +** by this routines should only be different from the header values +** if you compile your program using an sqlite3.h header from a +** different version of SQLite that the version of the library you +** link against. +** +** The sqlite3_version[] string constant contains the text of the +** [SQLITE_VERSION] string. The sqlite3_libversion() function returns +** a poiner to the sqlite3_version[] string constant. The function +** is provided for DLL users who can only access functions and not +** constants within the DLL. +*/ +SQLITE_EXTERN const char sqlite3_version[]; +const char *sqlite3_libversion(void); +int sqlite3_libversion_number(void); + +/* +** CAPI3REF: Test To See If The Library Is Threadsafe +** +** This routine returns TRUE (nonzero) if SQLite was compiled with +** all of its mutexes enabled and is thus threadsafe. It returns +** zero if the particular build is for single-threaded operation +** only. +** +** Really all this routine does is return true if SQLite was compiled +** with the -DSQLITE_THREADSAFE=1 option and false if +** compiled with -DSQLITE_THREADSAFE=0. If SQLite uses an +** application-defined mutex subsystem, malloc subsystem, collating +** sequence, VFS, SQL function, progress callback, commit hook, +** extension, or other accessories and these add-ons are not +** threadsafe, then clearly the combination will not be threadsafe +** either. Hence, this routine never reports that the library +** is guaranteed to be threadsafe, only when it is guaranteed not +** to be. +** +** This is an experimental API and may go away or change in future +** releases. +*/ +int sqlite3_threadsafe(void); + +/* +** CAPI3REF: Database Connection Handle +** +** Each open SQLite database is represented by pointer to an instance of the +** opaque structure named "sqlite3". It is useful to think of an sqlite3 +** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and +** [sqlite3_open_v2()] interfaces are its constructors +** and [sqlite3_close()] is its destructor. There are many other interfaces +** (such as [sqlite3_prepare_v2()], [sqlite3_create_function()], and +** [sqlite3_busy_timeout()] to name but three) that are methods on this +** object. +*/ +typedef struct sqlite3 sqlite3; + + +/* +** CAPI3REF: 64-Bit Integer Types +** +** Some compilers do not support the "long long" datatype. So we have +** to do compiler-specific typedefs for 64-bit signed and unsigned integers. +** +** Many SQLite interface functions require a 64-bit integer arguments. +** Those interfaces are declared using this typedef. +*/ +#ifdef SQLITE_INT64_TYPE + typedef SQLITE_INT64_TYPE sqlite_int64; + typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; +#elif defined(_MSC_VER) || defined(__BORLANDC__) + typedef __int64 sqlite_int64; + typedef unsigned __int64 sqlite_uint64; +#else + typedef long long int sqlite_int64; + typedef unsigned long long int sqlite_uint64; +#endif +typedef sqlite_int64 sqlite3_int64; +typedef sqlite_uint64 sqlite3_uint64; + +/* +** If compiling for a processor that lacks floating point support, +** substitute integer for floating-point +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# define double sqlite3_int64 +#endif + +/* +** CAPI3REF: Closing A Database Connection +** +** Call this function with a pointer to a structure that was previously +** returned from [sqlite3_open()], [sqlite3_open16()], or +** [sqlite3_open_v2()] and the corresponding database will by +** closed. +** +** All SQL statements prepared using [sqlite3_prepare_v2()] or +** [sqlite3_prepare16_v2()] must be destroyed using [sqlite3_finalize()] +** before this routine is called. Otherwise, SQLITE_BUSY is returned and the +** database connection remains open. +** +** Passing this routine a database connection that has already been +** closed results in undefined behavior. If other interfaces that +** reference the same database connection are pending (either in the +** same thread or in different threads) when this routine is called, +** then the behavior is undefined and is almost certainly undesirable. +*/ +int sqlite3_close(sqlite3 *); + +/* +** The type for a callback function. +** This is legacy and deprecated. It is included for historical +** compatibility and is not documented. +*/ +typedef int (*sqlite3_callback)(void*,int,char**, char**); + +/* +** CAPI3REF: One-Step Query Execution Interface +** +** This interface is used to do a one-time evaluatation of zero +** or more SQL statements. UTF-8 text of the SQL statements to +** be evaluted is passed in as the second parameter. The statements +** are prepared one by one using [sqlite3_prepare()], evaluated +** using [sqlite3_step()], then destroyed using [sqlite3_finalize()]. +** +** If one or more of the SQL statements are queries, then +** the callback function specified by the 3rd parameter is +** invoked once for each row of the query result. This callback +** should normally return 0. If the callback returns a non-zero +** value then the query is aborted, all subsequent SQL statements +** are skipped and the sqlite3_exec() function returns the [SQLITE_ABORT]. +** +** The 4th parameter to this interface is an arbitrary pointer that is +** passed through to the callback function as its first parameter. +** +** The 2nd parameter to the callback function is the number of +** columns in the query result. The 3rd parameter to the callback +** is an array of strings holding the values for each column +** as extracted using [sqlite3_column_text()]. +** The 4th parameter to the callback is an array of strings +** obtained using [sqlite3_column_name()] and holding +** the names of each column. +** +** The callback function may be NULL, even for queries. A NULL +** callback is not an error. It just means that no callback +** will be invoked. +** +** If an error occurs while parsing or evaluating the SQL (but +** not while executing the callback) then an appropriate error +** message is written into memory obtained from [sqlite3_malloc()] and +** *errmsg is made to point to that message. The calling function +** is responsible for freeing the memory using [sqlite3_free()]. +** If errmsg==NULL, then no error message is ever written. +** +** The return value is is SQLITE_OK if there are no errors and +** some other [SQLITE_OK | return code] if there is an error. +** The particular return value depends on the type of error. +** +*/ +int sqlite3_exec( + sqlite3*, /* An open database */ + const char *sql, /* SQL to be evaluted */ + int (*callback)(void*,int,char**,char**), /* Callback function */ + void *, /* 1st argument to callback */ + char **errmsg /* Error msg written here */ +); + +/* +** CAPI3REF: Result Codes +** KEYWORDS: SQLITE_OK +** +** Many SQLite functions return an integer result code from the set shown +** above in order to indicates success or failure. +** +** The result codes above are the only ones returned by SQLite in its +** default configuration. However, the [sqlite3_extended_result_codes()] +** API can be used to set a database connectoin to return more detailed +** result codes. +** +** See also: [SQLITE_IOERR_READ | extended result codes] +** +*/ +#define SQLITE_OK 0 /* Successful result */ +/* beginning-of-error-codes */ +#define SQLITE_ERROR 1 /* SQL error or missing database */ +#define SQLITE_INTERNAL 2 /* NOT USED. Internal logic error in SQLite */ +#define SQLITE_PERM 3 /* Access permission denied */ +#define SQLITE_ABORT 4 /* Callback routine requested an abort */ +#define SQLITE_BUSY 5 /* The database file is locked */ +#define SQLITE_LOCKED 6 /* A table in the database is locked */ +#define SQLITE_NOMEM 7 /* A malloc() failed */ +#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ +#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ +#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ +#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ +#define SQLITE_NOTFOUND 12 /* NOT USED. Table or record not found */ +#define SQLITE_FULL 13 /* Insertion failed because database is full */ +#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ +#define SQLITE_PROTOCOL 15 /* NOT USED. Database lock protocol error */ +#define SQLITE_EMPTY 16 /* Database is empty */ +#define SQLITE_SCHEMA 17 /* The database schema changed */ +#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ +#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ +#define SQLITE_MISMATCH 20 /* Data type mismatch */ +#define SQLITE_MISUSE 21 /* Library used incorrectly */ +#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ +#define SQLITE_AUTH 23 /* Authorization denied */ +#define SQLITE_FORMAT 24 /* Auxiliary database format error */ +#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ +#define SQLITE_NOTADB 26 /* File opened that is not a database file */ +#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ +#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ +/* end-of-error-codes */ + +/* +** CAPI3REF: Extended Result Codes +** +** In its default configuration, SQLite API routines return one of 26 integer +** result codes described at result-codes. However, experience has shown that +** many of these result codes are too course-grained. They do not provide as +** much information about problems as users might like. In an effort to +** address this, newer versions of SQLite (version 3.3.8 and later) include +** support for additional result codes that provide more detailed information +** about errors. The extended result codes are enabled (or disabled) for +** each database +** connection using the [sqlite3_extended_result_codes()] API. +** +** Some of the available extended result codes are listed above. +** We expect the number of extended result codes will be expand +** over time. Software that uses extended result codes should expect +** to see new result codes in future releases of SQLite. +** +** The symbolic name for an extended result code always contains a related +** primary result code as a prefix. Primary result codes contain a single +** "_" character. Extended result codes contain two or more "_" characters. +** The numeric value of an extended result code can be converted to its +** corresponding primary result code by masking off the lower 8 bytes. +** +** The SQLITE_OK result code will never be extended. It will always +** be exactly zero. +*/ +#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) +#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) +#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) +#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) +#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) +#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) +#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) +#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) +#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) +#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) +#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) +#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) + +/* +** CAPI3REF: Flags For File Open Operations +** +** Combination of the following bit values are used as the +** third argument to the [sqlite3_open_v2()] interface and +** as fourth argument to the xOpen method of the +** [sqlite3_vfs] object. +** +*/ +#define SQLITE_OPEN_READONLY 0x00000001 +#define SQLITE_OPEN_READWRITE 0x00000002 +#define SQLITE_OPEN_CREATE 0x00000004 +#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 +#define SQLITE_OPEN_EXCLUSIVE 0x00000010 +#define SQLITE_OPEN_MAIN_DB 0x00000100 +#define SQLITE_OPEN_TEMP_DB 0x00000200 +#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 +#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 +#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 +#define SQLITE_OPEN_SUBJOURNAL 0x00002000 +#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 + +/* +** CAPI3REF: Device Characteristics +** +** The xDeviceCapabilities method of the [sqlite3_io_methods] +** object returns an integer which is a vector of the following +** bit values expressing I/O characteristics of the mass storage +** device that holds the file that the [sqlite3_io_methods] +** refers to. +** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). +*/ +#define SQLITE_IOCAP_ATOMIC 0x00000001 +#define SQLITE_IOCAP_ATOMIC512 0x00000002 +#define SQLITE_IOCAP_ATOMIC1K 0x00000004 +#define SQLITE_IOCAP_ATOMIC2K 0x00000008 +#define SQLITE_IOCAP_ATOMIC4K 0x00000010 +#define SQLITE_IOCAP_ATOMIC8K 0x00000020 +#define SQLITE_IOCAP_ATOMIC16K 0x00000040 +#define SQLITE_IOCAP_ATOMIC32K 0x00000080 +#define SQLITE_IOCAP_ATOMIC64K 0x00000100 +#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 +#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 + +/* +** CAPI3REF: File Locking Levels +** +** SQLite uses one of the following integer values as the second +** argument to calls it makes to the xLock() and xUnlock() methods +** of an [sqlite3_io_methods] object. +*/ +#define SQLITE_LOCK_NONE 0 +#define SQLITE_LOCK_SHARED 1 +#define SQLITE_LOCK_RESERVED 2 +#define SQLITE_LOCK_PENDING 3 +#define SQLITE_LOCK_EXCLUSIVE 4 + +/* +** CAPI3REF: Synchronization Type Flags +** +** When SQLite invokes the xSync() method of an [sqlite3_io_methods] +** object it uses a combination of the following integer values as +** the second argument. +** +** When the SQLITE_SYNC_DATAONLY flag is used, it means that the +** sync operation only needs to flush data to mass storage. Inode +** information need not be flushed. The SQLITE_SYNC_NORMAL means +** to use normal fsync() semantics. The SQLITE_SYNC_FULL flag means +** to use Mac OS-X style fullsync instead of fsync(). +*/ +#define SQLITE_SYNC_NORMAL 0x00002 +#define SQLITE_SYNC_FULL 0x00003 +#define SQLITE_SYNC_DATAONLY 0x00010 + + +/* +** CAPI3REF: OS Interface Open File Handle +** +** An [sqlite3_file] object represents an open file in the OS +** interface layer. Individual OS interface implementations will +** want to subclass this object by appending additional fields +** for their own use. The pMethods entry is a pointer to an +** [sqlite3_io_methods] object that defines methods for performing +** I/O operations on the open file. +*/ +typedef struct sqlite3_file sqlite3_file; +struct sqlite3_file { + const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ +}; + +/* +** CAPI3REF: OS Interface File Virtual Methods Object +** +** Every file opened by the [sqlite3_vfs] xOpen method contains a pointer to +** an instance of the this object. This object defines the +** methods used to perform various operations against the open file. +** +** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or +** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). +* The second choice is an +** OS-X style fullsync. The SQLITE_SYNC_DATA flag may be ORed in to +** indicate that only the data of the file and not its inode needs to be +** synced. +** +** The integer values to xLock() and xUnlock() are one of +**
    +**
  • [SQLITE_LOCK_NONE], +**
  • [SQLITE_LOCK_SHARED], +**
  • [SQLITE_LOCK_RESERVED], +**
  • [SQLITE_LOCK_PENDING], or +**
  • [SQLITE_LOCK_EXCLUSIVE]. +**
+** xLock() increases the lock. xUnlock() decreases the lock. +** The xCheckReservedLock() method looks +** to see if any database connection, either in this +** process or in some other process, is holding an RESERVED, +** PENDING, or EXCLUSIVE lock on the file. It returns true +** if such a lock exists and false if not. +** +** The xFileControl() method is a generic interface that allows custom +** VFS implementations to directly control an open file using the +** [sqlite3_file_control()] interface. The second "op" argument +** is an integer opcode. The third +** argument is a generic pointer which is intended to be a pointer +** to a structure that may contain arguments or space in which to +** write return values. Potential uses for xFileControl() might be +** functions to enable blocking locks with timeouts, to change the +** locking strategy (for example to use dot-file locks), to inquire +** about the status of a lock, or to break stale locks. The SQLite +** core reserves opcodes less than 100 for its own use. +** A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available. +** Applications that define a custom xFileControl method should use opcodes +** greater than 100 to avoid conflicts. +** +** The xSectorSize() method returns the sector size of the +** device that underlies the file. The sector size is the +** minimum write that can be performed without disturbing +** other bytes in the file. The xDeviceCharacteristics() +** method returns a bit vector describing behaviors of the +** underlying device: +** +**
    +**
  • [SQLITE_IOCAP_ATOMIC] +**
  • [SQLITE_IOCAP_ATOMIC512] +**
  • [SQLITE_IOCAP_ATOMIC1K] +**
  • [SQLITE_IOCAP_ATOMIC2K] +**
  • [SQLITE_IOCAP_ATOMIC4K] +**
  • [SQLITE_IOCAP_ATOMIC8K] +**
  • [SQLITE_IOCAP_ATOMIC16K] +**
  • [SQLITE_IOCAP_ATOMIC32K] +**
  • [SQLITE_IOCAP_ATOMIC64K] +**
  • [SQLITE_IOCAP_SAFE_APPEND] +**
  • [SQLITE_IOCAP_SEQUENTIAL] +**
+** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). +*/ +typedef struct sqlite3_io_methods sqlite3_io_methods; +struct sqlite3_io_methods { + int iVersion; + int (*xClose)(sqlite3_file*); + int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); + int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); + int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); + int (*xSync)(sqlite3_file*, int flags); + int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); + int (*xLock)(sqlite3_file*, int); + int (*xUnlock)(sqlite3_file*, int); + int (*xCheckReservedLock)(sqlite3_file*); + int (*xFileControl)(sqlite3_file*, int op, void *pArg); + int (*xSectorSize)(sqlite3_file*); + int (*xDeviceCharacteristics)(sqlite3_file*); + /* Additional methods may be added in future releases */ +}; + +/* +** CAPI3REF: Standard File Control Opcodes +** +** These integer constants are opcodes for the xFileControl method +** of the [sqlite3_io_methods] object and to the [sqlite3_file_control()] +** interface. +** +** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This +** opcode cases the xFileControl method to write the current state of +** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], +** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) +** into an integer that the pArg argument points to. This capability +** is used during testing and only needs to be supported when SQLITE_TEST +** is defined. +*/ +#define SQLITE_FCNTL_LOCKSTATE 1 + +/* +** CAPI3REF: Mutex Handle +** +** The mutex module within SQLite defines [sqlite3_mutex] to be an +** abstract type for a mutex object. The SQLite core never looks +** at the internal representation of an [sqlite3_mutex]. It only +** deals with pointers to the [sqlite3_mutex] object. +** +** Mutexes are created using [sqlite3_mutex_alloc()]. +*/ +typedef struct sqlite3_mutex sqlite3_mutex; + +/* +** CAPI3REF: OS Interface Object +** +** An instance of this object defines the interface between the +** SQLite core and the underlying operating system. The "vfs" +** in the name of the object stands for "virtual file system". +** +** The iVersion field is initially 1 but may be larger for future +** versions of SQLite. Additional fields may be appended to this +** object when the iVersion value is increased. +** +** The szOsFile field is the size of the subclassed [sqlite3_file] +** structure used by this VFS. mxPathname is the maximum length of +** a pathname in this VFS. +** +** Registered vfs modules are kept on a linked list formed by +** the pNext pointer. The [sqlite3_vfs_register()] +** and [sqlite3_vfs_unregister()] interfaces manage this list +** in a thread-safe way. The [sqlite3_vfs_find()] interface +** searches the list. +** +** The pNext field is the only fields in the sqlite3_vfs +** structure that SQLite will ever modify. SQLite will only access +** or modify this field while holding a particular static mutex. +** The application should never modify anything within the sqlite3_vfs +** object once the object has been registered. +** +** The zName field holds the name of the VFS module. The name must +** be unique across all VFS modules. +** +** SQLite will guarantee that the zFilename string passed to +** xOpen() is a full pathname as generated by xFullPathname() and +** that the string will be valid and unchanged until xClose() is +** called. So the [sqlite3_file] can store a pointer to the +** filename if it needs to remember the filename for some reason. +** +** The flags argument to xOpen() is a copy of the flags argument +** to [sqlite3_open_v2()]. If [sqlite3_open()] or [sqlite3_open16()] +** is used, then flags is [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. +** If xOpen() opens a file read-only then it sets *pOutFlags to +** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be +** set. +** +** SQLite will also add one of the following flags to the xOpen() +** call, depending on the object being opened: +** +**
    +**
  • [SQLITE_OPEN_MAIN_DB] +**
  • [SQLITE_OPEN_MAIN_JOURNAL] +**
  • [SQLITE_OPEN_TEMP_DB] +**
  • [SQLITE_OPEN_TEMP_JOURNAL] +**
  • [SQLITE_OPEN_TRANSIENT_DB] +**
  • [SQLITE_OPEN_SUBJOURNAL] +**
  • [SQLITE_OPEN_MASTER_JOURNAL] +**
+** +** The file I/O implementation can use the object type flags to +** changes the way it deals with files. For example, an application +** that does not care about crash recovery or rollback, might make +** the open of a journal file a no-op. Writes to this journal are +** also a no-op. Any attempt to read the journal return SQLITE_IOERR. +** Or the implementation might recognize the a database file will +** be doing page-aligned sector reads and writes in a random order +** and set up its I/O subsystem accordingly. +** +** SQLite might also add one of the following flags to the xOpen +** method: +** +**
    +**
  • [SQLITE_OPEN_DELETEONCLOSE] +**
  • [SQLITE_OPEN_EXCLUSIVE] +**
+** +** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be +** deleted when it is closed. This will always be set for TEMP +** databases and journals and for subjournals. The +** [SQLITE_OPEN_EXCLUSIVE] flag means the file should be opened +** for exclusive access. This flag is set for all files except +** for the main database file. +** +** Space to hold the [sqlite3_file] structure passed as the third +** argument to xOpen is allocated by caller (the SQLite core). +** szOsFile bytes are allocated for this object. The xOpen method +** fills in the allocated space. +** +** The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] +** to test for the existance of a file, +** or [SQLITE_ACCESS_READWRITE] to test to see +** if a file is readable and writable, or [SQLITE_ACCESS_READ] +** to test to see if a file is at least readable. The file can be a +** directory. +** +** SQLite will always allocate at least mxPathname+1 byte for +** the output buffers for xGetTempname and xFullPathname. The exact +** size of the output buffer is also passed as a parameter to both +** methods. If the output buffer is not large enough, SQLITE_CANTOPEN +** should be returned. As this is handled as a fatal error by SQLite, +** vfs implementations should endevour to prevent this by setting +** mxPathname to a sufficiently large value. +** +** The xRandomness(), xSleep(), and xCurrentTime() interfaces +** are not strictly a part of the filesystem, but they are +** included in the VFS structure for completeness. +** The xRandomness() function attempts to return nBytes bytes +** of good-quality randomness into zOut. The return value is +** the actual number of bytes of randomness obtained. The +** xSleep() method cause the calling thread to sleep for at +** least the number of microseconds given. The xCurrentTime() +** method returns a Julian Day Number for the current date and +** time. +*/ +typedef struct sqlite3_vfs sqlite3_vfs; +struct sqlite3_vfs { + int iVersion; /* Structure version number */ + int szOsFile; /* Size of subclassed sqlite3_file */ + int mxPathname; /* Maximum file pathname length */ + sqlite3_vfs *pNext; /* Next registered VFS */ + const char *zName; /* Name of this virtual file system */ + void *pAppData; /* Pointer to application-specific data */ + int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, + int flags, int *pOutFlags); + int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); + int (*xAccess)(sqlite3_vfs*, const char *zName, int flags); + int (*xGetTempname)(sqlite3_vfs*, int nOut, char *zOut); + int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); + void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); + void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); + void *(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol); + void (*xDlClose)(sqlite3_vfs*, void*); + int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); + int (*xSleep)(sqlite3_vfs*, int microseconds); + int (*xCurrentTime)(sqlite3_vfs*, double*); + /* New fields may be appended in figure versions. The iVersion + ** value will increment whenever this happens. */ +}; + +/* +** CAPI3REF: Flags for the xAccess VFS method +** +** These integer constants can be used as the third parameter to +** the xAccess method of an [sqlite3_vfs] object. They determine +** the kind of what kind of permissions the xAccess method is +** looking for. With SQLITE_ACCESS_EXISTS, the xAccess method +** simply checks to see if the file exists. With SQLITE_ACCESS_READWRITE, +** the xAccess method checks to see if the file is both readable +** and writable. With SQLITE_ACCESS_READ the xAccess method +** checks to see if the file is readable. +*/ +#define SQLITE_ACCESS_EXISTS 0 +#define SQLITE_ACCESS_READWRITE 1 +#define SQLITE_ACCESS_READ 2 + +/* +** CAPI3REF: Enable Or Disable Extended Result Codes +** +** This routine enables or disables the +** [SQLITE_IOERR_READ | extended result codes] feature. +** By default, SQLite API routines return one of only 26 integer +** [SQLITE_OK | result codes]. When extended result codes +** are enabled by this routine, the repetoire of result codes can be +** much larger and can (hopefully) provide more detailed information +** about the cause of an error. +** +** The second argument is a boolean value that turns extended result +** codes on and off. Extended result codes are off by default for +** backwards compatibility with older versions of SQLite. +*/ +int sqlite3_extended_result_codes(sqlite3*, int onoff); + +/* +** CAPI3REF: Last Insert Rowid +** +** Each entry in an SQLite table has a unique 64-bit signed integer key +** called the "rowid". The rowid is always available as an undeclared +** column named ROWID, OID, or _ROWID_. If the table has a column of +** type INTEGER PRIMARY KEY then that column is another an alias for the +** rowid. +** +** This routine returns the rowid of the most recent INSERT into +** the database from the database connection given in the first +** argument. If no inserts have ever occurred on this database +** connection, zero is returned. +** +** If an INSERT occurs within a trigger, then the rowid of the +** inserted row is returned by this routine as long as the trigger +** is running. But once the trigger terminates, the value returned +** by this routine reverts to the last value inserted before the +** trigger fired. +** +** If another thread does a new insert on the same database connection +** while this routine is running and thus changes the last insert rowid, +** then the return value of this routine is undefined. +*/ +sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); + +/* +** CAPI3REF: Count The Number Of Rows Modified +** +** This function returns the number of database rows that were changed +** (or inserted or deleted) by the most recent SQL statement. Only +** changes that are directly specified by the INSERT, UPDATE, or +** DELETE statement are counted. Auxiliary changes caused by +** triggers are not counted. Use the [sqlite3_total_changes()] function +** to find the total number of changes including changes caused by triggers. +** +** Within the body of a trigger, the sqlite3_changes() interface can be +** called to find the number of +** changes in the most recently completed INSERT, UPDATE, or DELETE +** statement within the body of the trigger. +** +** All changes are counted, even if they were later undone by a +** ROLLBACK or ABORT. Except, changes associated with creating and +** dropping tables are not counted. +** +** If a callback invokes [sqlite3_exec()] or [sqlite3_step()] recursively, +** then the changes in the inner, recursive call are counted together +** with the changes in the outer call. +** +** SQLite implements the command "DELETE FROM table" without a WHERE clause +** by dropping and recreating the table. (This is much faster than going +** through and deleting individual elements from the table.) Because of +** this optimization, the change count for "DELETE FROM table" will be +** zero regardless of the number of elements that were originally in the +** table. To get an accurate count of the number of rows deleted, use +** "DELETE FROM table WHERE 1" instead. +** +** If another thread makes changes on the same database connection +** while this routine is running then the return value of this routine +** is undefined. +*/ +int sqlite3_changes(sqlite3*); + +/* +** CAPI3REF: Total Number Of Rows Modified +*** +** This function returns the number of database rows that have been +** modified by INSERT, UPDATE or DELETE statements since the database handle +** was opened. This includes UPDATE, INSERT and DELETE statements executed +** as part of trigger programs. All changes are counted as soon as the +** statement that makes them is completed (when the statement handle is +** passed to [sqlite3_reset()] or [sqlite3_finalize()]). +** +** See also the [sqlite3_change()] interface. +** +** SQLite implements the command "DELETE FROM table" without a WHERE clause +** by dropping and recreating the table. (This is much faster than going +** through and deleting individual elements form the table.) Because of +** this optimization, the change count for "DELETE FROM table" will be +** zero regardless of the number of elements that were originally in the +** table. To get an accurate count of the number of rows deleted, use +** "DELETE FROM table WHERE 1" instead. +** +** If another thread makes changes on the same database connection +** while this routine is running then the return value of this routine +** is undefined. +*/ +int sqlite3_total_changes(sqlite3*); + +/* +** CAPI3REF: Interrupt A Long-Running Query +** +** This function causes any pending database operation to abort and +** return at its earliest opportunity. This routine is typically +** called in response to a user action such as pressing "Cancel" +** or Ctrl-C where the user wants a long query operation to halt +** immediately. +** +** It is safe to call this routine from a thread different from the +** thread that is currently running the database operation. But it +** is not safe to call this routine with a database connection that +** is closed or might close before sqlite3_interrupt() returns. +** +** The SQL operation that is interrupted will return [SQLITE_INTERRUPT]. +** If an interrupted operation was an update that is inside an +** explicit transaction, then the entire transaction will be rolled +** back automatically. +*/ +void sqlite3_interrupt(sqlite3*); + +/* +** CAPI3REF: Determine If An SQL Statement Is Complete +** +** These functions return true if the given input string comprises +** one or more complete SQL statements. For the sqlite3_complete() call, +** the parameter must be a nul-terminated UTF-8 string. For +** sqlite3_complete16(), a nul-terminated machine byte order UTF-16 string +** is required. +** +** These routines are useful for command-line input to determine if the +** currently entered text forms one or more complete SQL statements or +** if additional input is needed before sending the statements into +** SQLite for parsing. The algorithm is simple. If the +** last token other than spaces and comments is a semicolon, then return +** true. Actually, the algorithm is a little more complicated than that +** in order to deal with triggers, but the basic idea is the same: the +** statement is not complete unless it ends in a semicolon. +*/ +int sqlite3_complete(const char *sql); +int sqlite3_complete16(const void *sql); + +/* +** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors +** +** This routine identifies a callback function that might be invoked +** whenever an attempt is made to open a database table +** that another thread or process has locked. +** If the busy callback is NULL, then [SQLITE_BUSY] +** (or sometimes [SQLITE_IOERR_BLOCKED]) +** is returned immediately upon encountering the lock. +** If the busy callback is not NULL, then the +** callback will be invoked with two arguments. The +** first argument to the handler is a copy of the void* pointer which +** is the third argument to this routine. The second argument to +** the handler is the number of times that the busy handler has +** been invoked for this locking event. If the +** busy callback returns 0, then no additional attempts are made to +** access the database and [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] is returned. +** If the callback returns non-zero, then another attempt is made to open the +** database for reading and the cycle repeats. +** +** The presence of a busy handler does not guarantee that +** it will be invoked when there is lock contention. +** If SQLite determines that invoking the busy handler could result in +** a deadlock, it will return [SQLITE_BUSY] instead. +** Consider a scenario where one process is holding a read lock that +** it is trying to promote to a reserved lock and +** a second process is holding a reserved lock that it is trying +** to promote to an exclusive lock. The first process cannot proceed +** because it is blocked by the second and the second process cannot +** proceed because it is blocked by the first. If both processes +** invoke the busy handlers, neither will make any progress. Therefore, +** SQLite returns [SQLITE_BUSY] for the first process, hoping that this +** will induce the first process to release its read lock and allow +** the second process to proceed. +** +** The default busy callback is NULL. +** +** The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED] when +** SQLite is in the middle of a large transaction where all the +** changes will not fit into the in-memory cache. SQLite will +** already hold a RESERVED lock on the database file, but it needs +** to promote this lock to EXCLUSIVE so that it can spill cache +** pages into the database file without harm to concurrent +** readers. If it is unable to promote the lock, then the in-memory +** cache will be left in an inconsistent state and so the error +** code is promoted from the relatively benign [SQLITE_BUSY] to +** the more severe [SQLITE_IOERR_BLOCKED]. This error code promotion +** forces an automatic rollback of the changes. See the +** +** CorruptionFollowingBusyError wiki page for a discussion of why +** this is important. +** +** Sqlite is re-entrant, so the busy handler may start a new query. +** (It is not clear why anyone would every want to do this, but it +** is allowed, in theory.) But the busy handler may not close the +** database. Closing the database from a busy handler will delete +** data structures out from under the executing query and will +** probably result in a segmentation fault or other runtime error. +** +** There can only be a single busy handler defined for each database +** connection. Setting a new busy handler clears any previous one. +** Note that calling [sqlite3_busy_timeout()] will also set or clear +** the busy handler. +** +** When operating in [sqlite3_enable_shared_cache | shared cache mode], +** only a single busy handler can be defined for each database file. +** So if two database connections share a single cache, then changing +** the busy handler on one connection will also change the busy +** handler in the other connection. The busy handler is invoked +** in the thread that was running when the SQLITE_BUSY was hit. +*/ +int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); + +/* +** CAPI3REF: Set A Busy Timeout +** +** This routine sets a busy handler that sleeps for a while when a +** table is locked. The handler will sleep multiple times until +** at least "ms" milliseconds of sleeping have been done. After +** "ms" milliseconds of sleeping, the handler returns 0 which +** causes [sqlite3_step()] to return [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]. +** +** Calling this routine with an argument less than or equal to zero +** turns off all busy handlers. +** +** There can only be a single busy handler for a particular database +** connection. If another busy handler was defined +** (using [sqlite3_busy_handler()]) prior to calling +** this routine, that other busy handler is cleared. +*/ +int sqlite3_busy_timeout(sqlite3*, int ms); + +/* +** CAPI3REF: Convenience Routines For Running Queries +** +** This next routine is a convenience wrapper around [sqlite3_exec()]. +** Instead of invoking a user-supplied callback for each row of the +** result, this routine remembers each row of the result in memory +** obtained from [sqlite3_malloc()], then returns all of the result after the +** query has finished. +** +** As an example, suppose the query result where this table: +** +**
+**        Name        | Age
+**        -----------------------
+**        Alice       | 43
+**        Bob         | 28
+**        Cindy       | 21
+** 
+** +** If the 3rd argument were &azResult then after the function returns +** azResult will contain the following data: +** +**
+**        azResult[0] = "Name";
+**        azResult[1] = "Age";
+**        azResult[2] = "Alice";
+**        azResult[3] = "43";
+**        azResult[4] = "Bob";
+**        azResult[5] = "28";
+**        azResult[6] = "Cindy";
+**        azResult[7] = "21";
+** 
+** +** Notice that there is an extra row of data containing the column +** headers. But the *nrow return value is still 3. *ncolumn is +** set to 2. In general, the number of values inserted into azResult +** will be ((*nrow) + 1)*(*ncolumn). +** +** After the calling function has finished using the result, it should +** pass the result data pointer to sqlite3_free_table() in order to +** release the memory that was malloc-ed. Because of the way the +** [sqlite3_malloc()] happens, the calling function must not try to call +** [sqlite3_free()] directly. Only [sqlite3_free_table()] is able to release +** the memory properly and safely. +** +** The return value of this routine is the same as from [sqlite3_exec()]. +*/ +int sqlite3_get_table( + sqlite3*, /* An open database */ + const char *sql, /* SQL to be executed */ + char ***resultp, /* Result written to a char *[] that this points to */ + int *nrow, /* Number of result rows written here */ + int *ncolumn, /* Number of result columns written here */ + char **errmsg /* Error msg written here */ +); +void sqlite3_free_table(char **result); + +/* +** CAPI3REF: Formatted String Printing Functions +** +** These routines are workalikes of the "printf()" family of functions +** from the standard C library. +** +** The sqlite3_mprintf() and sqlite3_vmprintf() routines write their +** results into memory obtained from [sqlite3_malloc()]. +** The strings returned by these two routines should be +** released by [sqlite3_free()]. Both routines return a +** NULL pointer if [sqlite3_malloc()] is unable to allocate enough +** memory to hold the resulting string. +** +** In sqlite3_snprintf() routine is similar to "snprintf()" from +** the standard C library. The result is written into the +** buffer supplied as the second parameter whose size is given by +** the first parameter. Note that the order of the +** first two parameters is reversed from snprintf(). This is an +** historical accident that cannot be fixed without breaking +** backwards compatibility. Note also that sqlite3_snprintf() +** returns a pointer to its buffer instead of the number of +** characters actually written into the buffer. We admit that +** the number of characters written would be a more useful return +** value but we cannot change the implementation of sqlite3_snprintf() +** now without breaking compatibility. +** +** As long as the buffer size is greater than zero, sqlite3_snprintf() +** guarantees that the buffer is always zero-terminated. The first +** parameter "n" is the total size of the buffer, including space for +** the zero terminator. So the longest string that can be completely +** written will be n-1 characters. +** +** These routines all implement some additional formatting +** options that are useful for constructing SQL statements. +** All of the usual printf formatting options apply. In addition, there +** is are "%q", "%Q", and "%z" options. +** +** The %q option works like %s in that it substitutes a null-terminated +** string from the argument list. But %q also doubles every '\'' character. +** %q is designed for use inside a string literal. By doubling each '\'' +** character it escapes that character and allows it to be inserted into +** the string. +** +** For example, so some string variable contains text as follows: +** +**
+**  char *zText = "It's a happy day!";
+** 
+** +** One can use this text in an SQL statement as follows: +** +**
+**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
+**  sqlite3_exec(db, zSQL, 0, 0, 0);
+**  sqlite3_free(zSQL);
+** 
+** +** Because the %q format string is used, the '\'' character in zText +** is escaped and the SQL generated is as follows: +** +**
+**  INSERT INTO table1 VALUES('It''s a happy day!')
+** 
+** +** This is correct. Had we used %s instead of %q, the generated SQL +** would have looked like this: +** +**
+**  INSERT INTO table1 VALUES('It's a happy day!');
+** 
+** +** This second example is an SQL syntax error. As a general rule you +** should always use %q instead of %s when inserting text into a string +** literal. +** +** The %Q option works like %q except it also adds single quotes around +** the outside of the total string. Or if the parameter in the argument +** list is a NULL pointer, %Q substitutes the text "NULL" (without single +** quotes) in place of the %Q option. So, for example, one could say: +** +**
+**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
+**  sqlite3_exec(db, zSQL, 0, 0, 0);
+**  sqlite3_free(zSQL);
+** 
+** +** The code above will render a correct SQL statement in the zSQL +** variable even if the zText variable is a NULL pointer. +** +** The "%z" formatting option works exactly like "%s" with the +** addition that after the string has been read and copied into +** the result, [sqlite3_free()] is called on the input string. +*/ +char *sqlite3_mprintf(const char*,...); +char *sqlite3_vmprintf(const char*, va_list); +char *sqlite3_snprintf(int,char*,const char*, ...); + +/* +** CAPI3REF: Memory Allocation Subsystem +** +** The SQLite core uses these three routines for all of its own +** internal memory allocation needs. (See the exception below.) +** The default implementation +** of the memory allocation subsystem uses the malloc(), realloc() +** and free() provided by the standard C library. However, if +** SQLite is compiled with the following C preprocessor macro +** +**
SQLITE_OMIT_MEMORY_ALLOCATION
+** +** then no implementation is provided for these routines by +** SQLite. The application that links against SQLite is +** expected to provide its own implementation. If the application +** does provide its own implementation for these routines, then +** it must also provide an implementations for +** [sqlite3_memory_alarm()], [sqlite3_memory_used()], and +** [sqlite3_memory_highwater()]. The alternative implementations +** for these last three routines need not actually work, but +** stub functions at least are needed to statisfy the linker. +** SQLite never calls [sqlite3_memory_highwater()] itself, but +** the symbol is included in a table as part of the +** [sqlite3_load_extension()] interface. The +** [sqlite3_memory_alarm()] and [sqlite3_memory_used()] interfaces +** are called by [sqlite3_soft_heap_limit()] and working implementations +** of both routines must be provided if [sqlite3_soft_heap_limit()] +** is to operate correctly. +** +** Exception: The windows OS interface layer calls +** the system malloc() and free() directly when converting +** filenames between the UTF-8 encoding used by SQLite +** and whatever filename encoding is used by the particular windows +** installation. Memory allocation errors are detected, but +** they are reported back as [SQLITE_CANTOPEN] or +** [SQLITE_IOERR] rather than [SQLITE_NOMEM]. +*/ +void *sqlite3_malloc(int); +void *sqlite3_realloc(void*, int); +void sqlite3_free(void*); + +/* +** CAPI3REF: Memory Allocator Statistics +** +** In addition to the basic three allocation routines +** [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()], +** the memory allocation subsystem included with the SQLite +** sources provides the interfaces shown below. +** +** The first of these two routines returns the amount of memory +** currently outstanding (malloced but not freed). The second +** returns the largest instantaneous amount of outstanding +** memory. The highwater mark is reset if the argument is +** true. +** +** The implementation of these routines in the SQLite core +** is omitted if the application is compiled with the +** SQLITE_OMIT_MEMORY_ALLOCATION macro defined. In that case, +** the application that links SQLite must provide its own +** alternative implementation. See the documentation on +** [sqlite3_malloc()] for additional information. +*/ +sqlite3_int64 sqlite3_memory_used(void); +sqlite3_int64 sqlite3_memory_highwater(int resetFlag); + +/* +** CAPI3REF: Memory Allocation Alarms +** +** The [sqlite3_memory_alarm] routine is used to register +** a callback on memory allocation events. +** +** This routine registers or clears a callbacks that fires when +** the amount of memory allocated exceeds iThreshold. Only +** a single callback can be registered at a time. Each call +** to [sqlite3_memory_alarm()] overwrites the previous callback. +** The callback is disabled by setting xCallback to a NULL +** pointer. +** +** The parameters to the callback are the pArg value, the +** amount of memory currently in use, and the size of the +** allocation that provoked the callback. The callback will +** presumably invoke [sqlite3_free()] to free up memory space. +** The callback may invoke [sqlite3_malloc()] or [sqlite3_realloc()] +** but if it does, no additional callbacks will be invoked by +** the recursive calls. +** +** The [sqlite3_soft_heap_limit()] interface works by registering +** a memory alarm at the soft heap limit and invoking +** [sqlite3_release_memory()] in the alarm callback. Application +** programs should not attempt to use the [sqlite3_memory_alarm()] +** interface because doing so will interfere with the +** [sqlite3_soft_heap_limit()] module. This interface is exposed +** only so that applications can provide their own +** alternative implementation when the SQLite core is +** compiled with SQLITE_OMIT_MEMORY_ALLOCATION. +*/ +int sqlite3_memory_alarm( + void(*xCallback)(void *pArg, sqlite3_int64 used, int N), + void *pArg, + sqlite3_int64 iThreshold +); + + +/* +** CAPI3REF: Compile-Time Authorization Callbacks +*** +** This routine registers a authorizer callback with the SQLite library. +** The authorizer callback is invoked as SQL statements are being compiled +** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], +** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. At various +** points during the compilation process, as logic is being created +** to perform various actions, the authorizer callback is invoked to +** see if those actions are allowed. The authorizer callback should +** return SQLITE_OK to allow the action, [SQLITE_IGNORE] to disallow the +** specific action but allow the SQL statement to continue to be +** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be +** rejected with an error. +** +** Depending on the action, the [SQLITE_IGNORE] and [SQLITE_DENY] return +** codes might mean something different or they might mean the same +** thing. If the action is, for example, to perform a delete opertion, +** then [SQLITE_IGNORE] and [SQLITE_DENY] both cause the statement compilation +** to fail with an error. But if the action is to read a specific column +** from a specific table, then [SQLITE_DENY] will cause the entire +** statement to fail but [SQLITE_IGNORE] will cause a NULL value to be +** read instead of the actual column value. +** +** The first parameter to the authorizer callback is a copy of +** the third parameter to the sqlite3_set_authorizer() interface. +** The second parameter to the callback is an integer +** [SQLITE_COPY | action code] that specifies the particular action +** to be authorized. The available action codes are +** [SQLITE_COPY | documented separately]. The third through sixth +** parameters to the callback are strings that contain additional +** details about the action to be authorized. +** +** An authorizer is used when preparing SQL statements from an untrusted +** source, to ensure that the SQL statements do not try to access data +** that they are not allowed to see, or that they do not try to +** execute malicious statements that damage the database. For +** example, an application may allow a user to enter arbitrary +** SQL queries for evaluation by a database. But the application does +** not want the user to be able to make arbitrary changes to the +** database. An authorizer could then be put in place while the +** user-entered SQL is being prepared that disallows everything +** except SELECT statements. +** +** Only a single authorizer can be in place on a database connection +** at a time. Each call to sqlite3_set_authorizer overrides the +** previous call. A NULL authorizer means that no authorization +** callback is invoked. The default authorizer is NULL. +** +** Note that the authorizer callback is invoked only during +** [sqlite3_prepare()] or its variants. Authorization is not +** performed during statement evaluation in [sqlite3_step()]. +*/ +int sqlite3_set_authorizer( + sqlite3*, + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), + void *pUserData +); + +/* +** CAPI3REF: Authorizer Return Codes +** +** The [sqlite3_set_authorizer | authorizer callback function] must +** return either [SQLITE_OK] or one of these two constants in order +** to signal SQLite whether or not the action is permitted. See the +** [sqlite3_set_authorizer | authorizer documentation] for additional +** information. +*/ +#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ +#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ + +/* +** CAPI3REF: Authorizer Action Codes +** +** The [sqlite3_set_authorizer()] interface registers a callback function +** that is invoked to authorizer certain SQL statement actions. The +** second parameter to the callback is an integer code that specifies +** what action is being authorized. These are the integer action codes that +** the authorizer callback may be passed. +** +** These action code values signify what kind of operation is to be +** authorized. The 3rd and 4th parameters to the authorization callback +** function will be parameters or NULL depending on which of these +** codes is used as the second parameter. The 5th parameter to the +** authorizer callback is the name of the database ("main", "temp", +** etc.) if applicable. The 6th parameter to the authorizer callback +** is the name of the inner-most trigger or view that is responsible for +** the access attempt or NULL if this access attempt is directly from +** top-level SQL code. +*/ +/******************************************* 3rd ************ 4th ***********/ +#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ +#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ +#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ +#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ +#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ +#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ +#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ +#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ +#define SQLITE_DELETE 9 /* Table Name NULL */ +#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ +#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ +#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ +#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ +#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ +#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ +#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ +#define SQLITE_DROP_VIEW 17 /* View Name NULL */ +#define SQLITE_INSERT 18 /* Table Name NULL */ +#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ +#define SQLITE_READ 20 /* Table Name Column Name */ +#define SQLITE_SELECT 21 /* NULL NULL */ +#define SQLITE_TRANSACTION 22 /* NULL NULL */ +#define SQLITE_UPDATE 23 /* Table Name Column Name */ +#define SQLITE_ATTACH 24 /* Filename NULL */ +#define SQLITE_DETACH 25 /* Database Name NULL */ +#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ +#define SQLITE_REINDEX 27 /* Index Name NULL */ +#define SQLITE_ANALYZE 28 /* Table Name NULL */ +#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ +#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ +#define SQLITE_FUNCTION 31 /* Function Name NULL */ +#define SQLITE_COPY 0 /* No longer used */ + +/* +** CAPI3REF: Tracing And Profiling Functions +** +** These routines register callback functions that can be used for +** tracing and profiling the execution of SQL statements. +** The callback function registered by sqlite3_trace() is invoked +** at the first [sqlite3_step()] for the evaluation of an SQL statement. +** The callback function registered by sqlite3_profile() is invoked +** as each SQL statement finishes and includes +** information on how long that statement ran. +** +** The sqlite3_profile() API is currently considered experimental and +** is subject to change. +*/ +void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); +void *sqlite3_profile(sqlite3*, + void(*xProfile)(void*,const char*,sqlite3_uint64), void*); + +/* +** CAPI3REF: Query Progress Callbacks +** +** This routine configures a callback function - the progress callback - that +** is invoked periodically during long running calls to [sqlite3_exec()], +** [sqlite3_step()] and [sqlite3_get_table()]. An example use for this +** interface is to keep a GUI updated during a large query. +** +** The progress callback is invoked once for every N virtual machine opcodes, +** where N is the second argument to this function. The progress callback +** itself is identified by the third argument to this function. The fourth +** argument to this function is a void pointer passed to the progress callback +** function each time it is invoked. +** +** If a call to [sqlite3_exec()], [sqlite3_step()], or [sqlite3_get_table()] +** results in fewer than N opcodes being executed, then the progress +** callback is never invoked. +** +** Only a single progress callback function may be registered for each +** open database connection. Every call to sqlite3_progress_handler() +** overwrites the results of the previous call. +** To remove the progress callback altogether, pass NULL as the third +** argument to this function. +** +** If the progress callback returns a result other than 0, then the current +** query is immediately terminated and any database changes rolled back. +** The containing [sqlite3_exec()], [sqlite3_step()], or +** [sqlite3_get_table()] call returns SQLITE_INTERRUPT. This feature +** can be used, for example, to implement the "Cancel" button on a +** progress dialog box in a GUI. +*/ +void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); + +/* +** CAPI3REF: Opening A New Database Connection +** +** Open the sqlite database file "filename". The "filename" is UTF-8 +** encoded for [sqlite3_open()] and [sqlite3_open_v2()] and UTF-16 encoded +** in the native byte order for [sqlite3_open16()]. +** An [sqlite3*] handle is returned in *ppDb, even +** if an error occurs. If the database is opened (or created) successfully, +** then [SQLITE_OK] is returned. Otherwise an error code is returned. The +** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain +** an English language description of the error. +** +** The default encoding for the database will be UTF-8 if +** [sqlite3_open()] or [sqlite3_open_v2()] is called and +** UTF-16 if [sqlite3_open16()] is used. +** +** Whether or not an error occurs when it is opened, resources associated +** with the [sqlite3*] handle should be released by passing it to +** [sqlite3_close()] when it is no longer required. +** +** The [sqlite3_open_v2()] interface works like [sqlite3_open()] except that +** provides two additional parameters for additional control over the +** new database connection. The flags parameter can be one of: +** +**
    +**
  1. [SQLITE_OPEN_READONLY] +**
  2. [SQLITE_OPEN_READWRITE] +**
  3. [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE] +**
+** +** The first value opens the database read-only. If the database does +** not previously exist, an error is returned. The second option opens +** the database for reading and writing if possible, or reading only if +** if the file is write protected. In either case the database must already +** exist or an error is returned. The third option opens the database +** for reading and writing and creates it if it does not already exist. +** The third options is behavior that is always used for [sqlite3_open()] +** and [sqlite3_open16()]. +** +** If the filename is ":memory:", then an private +** in-memory database is created for the connection. This in-memory +** database will vanish when the database connection is closed. Future +** version of SQLite might make use of additional special filenames +** that begin with the ":" character. It is recommended that +** when a database filename really does begin with +** ":" that you prefix the filename with a pathname like "./" to +** avoid ambiguity. +** +** If the filename is an empty string, then a private temporary +** on-disk database will be created. This private database will be +** automatically deleted as soon as the database connection is closed. +** +** The fourth parameter to sqlite3_open_v2() is the name of the +** [sqlite3_vfs] object that defines the operating system +** interface that the new database connection should use. If the +** fourth parameter is a NULL pointer then the default [sqlite3_vfs] +** object is used. +** +** Note to windows users: The encoding used for the filename argument +** of [sqlite3_open()] and [sqlite3_open_v2()] must be UTF-8, not whatever +** codepage is currently defined. Filenames containing international +** characters must be converted to UTF-8 prior to passing them into +** [sqlite3_open()] or [sqlite3_open_v2()]. +*/ +int sqlite3_open( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb /* OUT: SQLite db handle */ +); +int sqlite3_open16( + const void *filename, /* Database filename (UTF-16) */ + sqlite3 **ppDb /* OUT: SQLite db handle */ +); +int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +); + +/* +** CAPI3REF: Error Codes And Messages +** +** The sqlite3_errcode() interface returns the numeric +** [SQLITE_OK | result code] or [SQLITE_IOERR_READ | extended result code] +** for the most recent failed sqlite3_* API call associated +** with [sqlite3] handle 'db'. If a prior API call failed but the +** most recent API call succeeded, the return value from sqlite3_errcode() +** is undefined. +** +** The sqlite3_errmsg() and sqlite3_errmsg16() return English-language +** text that describes the error, as either UTF8 or UTF16 respectively. +** Memory to hold the error message string is managed internally. The +** string may be overwritten or deallocated by subsequent calls to SQLite +** interface functions. +** +** Calls to many sqlite3_* functions set the error code and string returned +** by [sqlite3_errcode()], [sqlite3_errmsg()], and [sqlite3_errmsg16()] +** (overwriting the previous values). Note that calls to [sqlite3_errcode()], +** [sqlite3_errmsg()], and [sqlite3_errmsg16()] themselves do not affect the +** results of future invocations. Calls to API routines that do not return +** an error code (example: [sqlite3_data_count()]) do not +** change the error code returned by this routine. Interfaces that are +** not associated with a specific database connection (examples: +** [sqlite3_mprintf()] or [sqlite3_enable_shared_cache()] do not change +** the return code. +** +** Assuming no other intervening sqlite3_* API calls are made, the error +** code returned by this function is associated with the same error as +** the strings returned by [sqlite3_errmsg()] and [sqlite3_errmsg16()]. +*/ +int sqlite3_errcode(sqlite3 *db); +const char *sqlite3_errmsg(sqlite3*); +const void *sqlite3_errmsg16(sqlite3*); + +/* +** CAPI3REF: SQL Statement Object +** +** Instance of this object represent single SQL statements. This +** is variously known as a "prepared statement" or a +** "compiled SQL statement" or simply as a "statement". +** +** The life of a statement object goes something like this: +** +**
    +**
  1. Create the object using [sqlite3_prepare_v2()] or a related +** function. +**
  2. Bind values to host parameters using +** [sqlite3_bind_blob | sqlite3_bind_* interfaces]. +**
  3. Run the SQL by calling [sqlite3_step()] one or more times. +**
  4. Reset the statement using [sqlite3_reset()] then go back +** to step 2. Do this zero or more times. +**
  5. Destroy the object using [sqlite3_finalize()]. +**
+** +** Refer to documentation on individual methods above for additional +** information. +*/ +typedef struct sqlite3_stmt sqlite3_stmt; + +/* +** CAPI3REF: Compiling An SQL Statement +** +** To execute an SQL query, it must first be compiled into a byte-code +** program using one of these routines. +** +** The first argument "db" is an [sqlite3 | SQLite database handle] +** obtained from a prior call to [sqlite3_open()], [sqlite3_open_v2()] +** or [sqlite3_open16()]. +** The second argument "zSql" is the statement to be compiled, encoded +** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() +** interfaces uses UTF-8 and sqlite3_prepare16() and sqlite3_prepare16_v2() +** use UTF-16. +** +** If the nByte argument is less +** than zero, then zSql is read up to the first zero terminator. If +** nByte is non-negative, then it is the maximum number of +** bytes read from zSql. When nByte is non-negative, the +** zSql string ends at either the first '\000' character or +** until the nByte-th byte, whichever comes first. +** +** *pzTail is made to point to the first byte past the end of the first +** SQL statement in zSql. This routine only compiles the first statement +** in zSql, so *pzTail is left pointing to what remains uncompiled. +** +** *ppStmt is left pointing to a compiled +** [sqlite3_stmt | SQL statement structure] that can be +** executed using [sqlite3_step()]. Or if there is an error, *ppStmt may be +** set to NULL. If the input text contained no SQL (if the input is and +** empty string or a comment) then *ppStmt is set to NULL. The calling +** procedure is responsible for deleting the compiled SQL statement +** using [sqlite3_finalize()] after it has finished with it. +** +** On success, [SQLITE_OK] is returned. Otherwise an +** [SQLITE_ERROR | error code] is returned. +** +** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are +** recommended for all new programs. The two older interfaces are retained +** for backwards compatibility, but their use is discouraged. +** In the "v2" interfaces, the prepared statement +** that is returned (the [sqlite3_stmt] object) contains a copy of the +** original SQL text. This causes the [sqlite3_step()] interface to +** behave a differently in two ways: +** +**
    +**
  1. +** If the database schema changes, instead of returning [SQLITE_SCHEMA] as it +** always used to do, [sqlite3_step()] will automatically recompile the SQL +** statement and try to run it again. If the schema has changed in a way +** that makes the statement no longer valid, [sqlite3_step()] will still +** return [SQLITE_SCHEMA]. But unlike the legacy behavior, [SQLITE_SCHEMA] is +** now a fatal error. Calling [sqlite3_prepare_v2()] again will not make the +** error go away. Note: use [sqlite3_errmsg()] to find the text of the parsing +** error that results in an [SQLITE_SCHEMA] return. +**
  2. +** +**
  3. +** When an error occurs, +** [sqlite3_step()] will return one of the detailed +** [SQLITE_ERROR | result codes] or +** [SQLITE_IOERR_READ | extended result codes] such as directly. +** The legacy behavior was that [sqlite3_step()] would only return a generic +** [SQLITE_ERROR] result code and you would have to make a second call to +** [sqlite3_reset()] in order to find the underlying cause of the problem. +** With the "v2" prepare interfaces, the underlying reason for the error is +** returned immediately. +**
  4. +**
+*/ +int sqlite3_prepare( + sqlite3 *db, /* Database handle */ + const char *zSql, /* SQL statement, UTF-8 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const char **pzTail /* OUT: Pointer to unused portion of zSql */ +); +int sqlite3_prepare_v2( + sqlite3 *db, /* Database handle */ + const char *zSql, /* SQL statement, UTF-8 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const char **pzTail /* OUT: Pointer to unused portion of zSql */ +); +int sqlite3_prepare16( + sqlite3 *db, /* Database handle */ + const void *zSql, /* SQL statement, UTF-16 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const void **pzTail /* OUT: Pointer to unused portion of zSql */ +); +int sqlite3_prepare16_v2( + sqlite3 *db, /* Database handle */ + const void *zSql, /* SQL statement, UTF-16 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const void **pzTail /* OUT: Pointer to unused portion of zSql */ +); + +/* +** CAPI3REF: Dynamically Typed Value Object +** +** SQLite uses dynamic typing for the values it stores. Values can +** be integers, floating point values, strings, BLOBs, or NULL. When +** passing around values internally, each value is represented as +** an instance of the sqlite3_value object. +*/ +typedef struct Mem sqlite3_value; + +/* +** CAPI3REF: SQL Function Context Object +** +** The context in which an SQL function executes is stored in an +** sqlite3_context object. A pointer to such an object is the +** first parameter to user-defined SQL functions. +*/ +typedef struct sqlite3_context sqlite3_context; + +/* +** CAPI3REF: Binding Values To Prepared Statements +** +** In the SQL strings input to [sqlite3_prepare_v2()] and its variants, +** one or more literals can be replace by a parameter in one of these +** forms: +** +**
    +**
  • ? +**
  • ?NNN +**
  • :AAA +**
  • @AAA +**
  • $VVV +**
+** +** In the parameter forms shown above NNN is an integer literal, +** AAA is an alphanumeric identifier and VVV is a variable name according +** to the syntax rules of the TCL programming language. +** The values of these parameters (also called "host parameter names") +** can be set using the sqlite3_bind_*() routines defined here. +** +** The first argument to the sqlite3_bind_*() routines always is a pointer +** to the [sqlite3_stmt] object returned from [sqlite3_prepare_v2()] or +** its variants. The second +** argument is the index of the parameter to be set. The first parameter has +** an index of 1. When the same named parameter is used more than once, second +** and subsequent +** occurrences have the same index as the first occurrence. The index for +** named parameters can be looked up using the +** [sqlite3_bind_parameter_name()] API if desired. The index for "?NNN" +** parametes is the value of NNN. +** The NNN value must be between 1 and the compile-time +** parameter SQLITE_MAX_VARIABLE_NUMBER (default value: 999). +** See limits.html for additional information. +** +** The third argument is the value to bind to the parameter. +** +** In those +** routines that have a fourth argument, its value is the number of bytes +** in the parameter. To be clear: the value is the number of bytes in the +** string, not the number of characters. The number +** of bytes does not include the zero-terminator at the end of strings. +** If the fourth parameter is negative, the length of the string is +** number of bytes up to the first zero terminator. +** +** The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and +** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or +** text after SQLite has finished with it. If the fifth argument is the +** special value [SQLITE_STATIC], then the library assumes that the information +** is in static, unmanaged space and does not need to be freed. If the +** fifth argument has the value [SQLITE_TRANSIENT], then SQLite makes its +** own private copy of the data immediately, before the sqlite3_bind_*() +** routine returns. +** +** The sqlite3_bind_zeroblob() routine binds a BLOB of length n that +** is filled with zeros. A zeroblob uses a fixed amount of memory +** (just an integer to hold it size) while it is being processed. +** Zeroblobs are intended to serve as place-holders for BLOBs whose +** content is later written using +** [sqlite3_blob_open | increment BLOB I/O] routines. A negative +** value for the zeroblob results in a zero-length BLOB. +** +** The sqlite3_bind_*() routines must be called after +** [sqlite3_prepare_v2()] (and its variants) or [sqlite3_reset()] and +** before [sqlite3_step()]. +** Bindings are not cleared by the [sqlite3_reset()] routine. +** Unbound parameters are interpreted as NULL. +** +** These routines return [SQLITE_OK] on success or an error code if +** anything goes wrong. [SQLITE_RANGE] is returned if the parameter +** index is out of range. [SQLITE_NOMEM] is returned if malloc fails. +** [SQLITE_MISUSE] is returned if these routines are called on a virtual +** machine that is the wrong state or which has already been finalized. +*/ +int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); +int sqlite3_bind_double(sqlite3_stmt*, int, double); +int sqlite3_bind_int(sqlite3_stmt*, int, int); +int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); +int sqlite3_bind_null(sqlite3_stmt*, int); +int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*)); +int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); +int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); +int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); + +/* +** CAPI3REF: Number Of Host Parameters +** +** Return the largest host parameter index in the precompiled statement given +** as the argument. When the host parameters are of the forms like ":AAA" +** or "?", then they are assigned sequential increasing numbers beginning +** with one, so the value returned is the number of parameters. However +** if the same host parameter name is used multiple times, each occurrance +** is given the same number, so the value returned in that case is the number +** of unique host parameter names. If host parameters of the form "?NNN" +** are used (where NNN is an integer) then there might be gaps in the +** numbering and the value returned by this interface is the index of the +** host parameter with the largest index value. +** +** The prepared statement must not be [sqlite3_finalize | finalized] +** prior to this routine returnning. Otherwise the results are undefined +** and probably undesirable. +*/ +int sqlite3_bind_parameter_count(sqlite3_stmt*); + +/* +** CAPI3REF: Name Of A Host Parameter +** +** This routine returns a pointer to the name of the n-th parameter in a +** [sqlite3_stmt | prepared statement]. +** Host parameters of the form ":AAA" or "@AAA" or "$VVV" have a name +** which is the string ":AAA" or "@AAA" or "$VVV". +** In other words, the initial ":" or "$" or "@" +** is included as part of the name. +** Parameters of the form "?" or "?NNN" have no name. +** +** The first bound parameter has an index of 1, not 0. +** +** If the value n is out of range or if the n-th parameter is nameless, +** then NULL is returned. The returned string is always in the +** UTF-8 encoding even if the named parameter was originally specified +** as UTF-16 in [sqlite3_prepare16()] or [sqlite3_prepare16_v2()]. +*/ +const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); + +/* +** CAPI3REF: Index Of A Parameter With A Given Name +** +** This routine returns the index of a host parameter with the given name. +** The name must match exactly. If no parameter with the given name is +** found, return 0. Parameter names must be UTF8. +*/ +int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); + +/* +** CAPI3REF: Reset All Bindings On A Prepared Statement +** +** Contrary to the intuition of many, [sqlite3_reset()] does not +** reset the [sqlite3_bind_blob | bindings] on a +** [sqlite3_stmt | prepared statement]. Use this routine to +** reset all host parameters to NULL. +*/ +int sqlite3_clear_bindings(sqlite3_stmt*); + +/* +** CAPI3REF: Number Of Columns In A Result Set +** +** Return the number of columns in the result set returned by the +** [sqlite3_stmt | compiled SQL statement]. This routine returns 0 +** if pStmt is an SQL statement that does not return data (for +** example an UPDATE). +*/ +int sqlite3_column_count(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Column Names In A Result Set +** +** These routines return the name assigned to a particular column +** in the result set of a SELECT statement. The sqlite3_column_name() +** interface returns a pointer to a UTF8 string and sqlite3_column_name16() +** returns a pointer to a UTF16 string. The first parameter is the +** [sqlite3_stmt | prepared statement] that implements the SELECT statement. +** The second parameter is the column number. The left-most column is +** number 0. +** +** The returned string pointer is valid until either the +** [sqlite3_stmt | prepared statement] is destroyed by [sqlite3_finalize()] +** or until the next call sqlite3_column_name() or sqlite3_column_name16() +** on the same column. +** +** If sqlite3_malloc() fails during the processing of either routine +** (for example during a conversion from UTF-8 to UTF-16) then a +** NULL pointer is returned. +*/ +const char *sqlite3_column_name(sqlite3_stmt*, int N); +const void *sqlite3_column_name16(sqlite3_stmt*, int N); + +/* +** CAPI3REF: Source Of Data In A Query Result +** +** These routines provide a means to determine what column of what +** table in which database a result of a SELECT statement comes from. +** The name of the database or table or column can be returned as +** either a UTF8 or UTF16 string. The _database_ routines return +** the database name, the _table_ routines return the table name, and +** the origin_ routines return the column name. +** The returned string is valid until +** the [sqlite3_stmt | prepared statement] is destroyed using +** [sqlite3_finalize()] or until the same information is requested +** again in a different encoding. +** +** The names returned are the original un-aliased names of the +** database, table, and column. +** +** The first argument to the following calls is a +** [sqlite3_stmt | compiled SQL statement]. +** These functions return information about the Nth column returned by +** the statement, where N is the second function argument. +** +** If the Nth column returned by the statement is an expression +** or subquery and is not a column value, then all of these functions +** return NULL. Otherwise, they return the +** name of the attached database, table and column that query result +** column was extracted from. +** +** As with all other SQLite APIs, those postfixed with "16" return UTF-16 +** encoded strings, the other functions return UTF-8. +** +** These APIs are only available if the library was compiled with the +** SQLITE_ENABLE_COLUMN_METADATA preprocessor symbol defined. +** +** If two or more threads call one or more of these routines against the same +** prepared statement and column at the same time then the results are +** undefined. +*/ +const char *sqlite3_column_database_name(sqlite3_stmt*,int); +const void *sqlite3_column_database_name16(sqlite3_stmt*,int); +const char *sqlite3_column_table_name(sqlite3_stmt*,int); +const void *sqlite3_column_table_name16(sqlite3_stmt*,int); +const char *sqlite3_column_origin_name(sqlite3_stmt*,int); +const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); + +/* +** CAPI3REF: Declared Datatype Of A Query Result +** +** The first parameter is a [sqlite3_stmt | compiled SQL statement]. +** If this statement is a SELECT statement and the Nth column of the +** returned result set of that SELECT is a table column (not an +** expression or subquery) then the declared type of the table +** column is returned. If the Nth column of the result set is an +** expression or subquery, then a NULL pointer is returned. +** The returned string is always UTF-8 encoded. For example, in +** the database schema: +** +** CREATE TABLE t1(c1 VARIANT); +** +** And the following statement compiled: +** +** SELECT c1 + 1, c1 FROM t1; +** +** Then this routine would return the string "VARIANT" for the second +** result column (i==1), and a NULL pointer for the first result column +** (i==0). +** +** SQLite uses dynamic run-time typing. So just because a column +** is declared to contain a particular type does not mean that the +** data stored in that column is of the declared type. SQLite is +** strongly typed, but the typing is dynamic not static. Type +** is associated with individual values, not with the containers +** used to hold those values. +*/ +const char *sqlite3_column_decltype(sqlite3_stmt *, int i); +const void *sqlite3_column_decltype16(sqlite3_stmt*,int); + +/* +** CAPI3REF: Evaluate An SQL Statement +** +** After an [sqlite3_stmt | SQL statement] has been prepared with a call +** to either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or to one of +** the legacy interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], +** then this function must be called one or more times to evaluate the +** statement. +** +** The details of the behavior of this sqlite3_step() interface depend +** on whether the statement was prepared using the newer "v2" interface +** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy +** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the +** new "v2" interface is recommended for new applications but the legacy +** interface will continue to be supported. +** +** In the lagacy interface, the return value will be either [SQLITE_BUSY], +** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. +** With the "v2" interface, any of the other [SQLITE_OK | result code] +** or [SQLITE_IOERR_READ | extended result code] might be returned as +** well. +** +** [SQLITE_BUSY] means that the database engine was unable to acquire the +** database locks it needs to do its job. If the statement is a COMMIT +** or occurs outside of an explicit transaction, then you can retry the +** statement. If the statement is not a COMMIT and occurs within a +** explicit transaction then you should rollback the transaction before +** continuing. +** +** [SQLITE_DONE] means that the statement has finished executing +** successfully. sqlite3_step() should not be called again on this virtual +** machine without first calling [sqlite3_reset()] to reset the virtual +** machine back to its initial state. +** +** If the SQL statement being executed returns any data, then +** [SQLITE_ROW] is returned each time a new row of data is ready +** for processing by the caller. The values may be accessed using +** the [sqlite3_column_int | column access functions]. +** sqlite3_step() is called again to retrieve the next row of data. +** +** [SQLITE_ERROR] means that a run-time error (such as a constraint +** violation) has occurred. sqlite3_step() should not be called again on +** the VM. More information may be found by calling [sqlite3_errmsg()]. +** With the legacy interface, a more specific error code (example: +** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) +** can be obtained by calling [sqlite3_reset()] on the +** [sqlite3_stmt | prepared statement]. In the "v2" interface, +** the more specific error code is returned directly by sqlite3_step(). +** +** [SQLITE_MISUSE] means that the this routine was called inappropriately. +** Perhaps it was called on a [sqlite3_stmt | prepared statement] that has +** already been [sqlite3_finalize | finalized] or on one that had +** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could +** be the case that the same database connection is being used by two or +** more threads at the same moment in time. +** +** Goofy Interface Alert: +** In the legacy interface, +** the sqlite3_step() API always returns a generic error code, +** [SQLITE_ERROR], following any error other than [SQLITE_BUSY] +** and [SQLITE_MISUSE]. You must call [sqlite3_reset()] or +** [sqlite3_finalize()] in order to find one of the specific +** [SQLITE_ERROR | result codes] that better describes the error. +** We admit that this is a goofy design. The problem has been fixed +** with the "v2" interface. If you prepare all of your SQL statements +** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead +** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()], then the +** more specific [SQLITE_ERROR | result codes] are returned directly +** by sqlite3_step(). The use of the "v2" interface is recommended. +*/ +int sqlite3_step(sqlite3_stmt*); + +/* +** CAPI3REF: +** +** Return the number of values in the current row of the result set. +** +** After a call to [sqlite3_step()] that returns [SQLITE_ROW], this routine +** will return the same value as the [sqlite3_column_count()] function. +** After [sqlite3_step()] has returned an [SQLITE_DONE], [SQLITE_BUSY], or +** a [SQLITE_ERROR | error code], or before [sqlite3_step()] has been +** called on the [sqlite3_stmt | prepared statement] for the first time, +** this routine returns zero. +*/ +int sqlite3_data_count(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Fundamental Datatypes +** +** Every value in SQLite has one of five fundamental datatypes: +** +**
    +**
  • 64-bit signed integer +**
  • 64-bit IEEE floating point number +**
  • string +**
  • BLOB +**
  • NULL +**
+** +** These constants are codes for each of those types. +** +** Note that the SQLITE_TEXT constant was also used in SQLite version 2 +** for a completely different meaning. Software that links against both +** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT not +** SQLITE_TEXT. +*/ +#define SQLITE_INTEGER 1 +#define SQLITE_FLOAT 2 +#define SQLITE_BLOB 4 +#define SQLITE_NULL 5 +#ifdef SQLITE_TEXT +# undef SQLITE_TEXT +#else +# define SQLITE_TEXT 3 +#endif +#define SQLITE3_TEXT 3 + +/* +** CAPI3REF: Results Values From A Query +** +** These routines return information about +** a single column of the current result row of a query. In every +** case the first argument is a pointer to the +** [sqlite3_stmt | SQL statement] that is being +** evaluated (the [sqlite3_stmt*] that was returned from +** [sqlite3_prepare_v2()] or one of its variants) and +** the second argument is the index of the column for which information +** should be returned. The left-most column of the result set +** has an index of 0. +** +** If the SQL statement is not currently point to a valid row, or if the +** the column index is out of range, the result is undefined. +** These routines may only be called when the most recent call to +** [sqlite3_step()] has returned [SQLITE_ROW] and neither +** [sqlite3_reset()] nor [sqlite3_finalize()] has been call subsequently. +** If any of these routines are called after [sqlite3_reset()] or +** [sqlite3_finalize()] or after [sqlite3_step()] has returned +** something other than [SQLITE_ROW], the results are undefined. +** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] +** are called from a different thread while any of these routines +** are pending, then the results are undefined. +** +** The sqlite3_column_type() routine returns +** [SQLITE_INTEGER | datatype code] for the initial data type +** of the result column. The returned value is one of [SQLITE_INTEGER], +** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value +** returned by sqlite3_column_type() is only meaningful if no type +** conversions have occurred as described below. After a type conversion, +** the value returned by sqlite3_column_type() is undefined. Future +** versions of SQLite may change the behavior of sqlite3_column_type() +** following a type conversion. +** +** If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() +** routine returns the number of bytes in that BLOB or string. +** If the result is a UTF-16 string, then sqlite3_column_bytes() converts +** the string to UTF-8 and then returns the number of bytes. +** If the result is a numeric value then sqlite3_column_bytes() uses +** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns +** the number of bytes in that string. +** The value returned does not include the zero terminator at the end +** of the string. For clarity: the value returned is the number of +** bytes in the string, not the number of characters. +** +** Strings returned by sqlite3_column_text() and sqlite3_column_text16(), +** even zero-length strings, are always zero terminated. The return +** value from sqlite3_column_blob() for a zero-length blob is an arbitrary +** pointer, possibly even a NULL pointer. +** +** The sqlite3_column_bytes16() routine is similar to sqlite3_column_bytes() +** but leaves the result in UTF-16 instead of UTF-8. +** The zero terminator is not included in this count. +** +** These routines attempt to convert the value where appropriate. For +** example, if the internal representation is FLOAT and a text result +** is requested, [sqlite3_snprintf()] is used internally to do the conversion +** automatically. The following table details the conversions that +** are applied: +** +**
+**
+**
Internal
Type
Requested
Type
Conversion +** +**
NULL INTEGER Result is 0 +**
NULL FLOAT Result is 0.0 +**
NULL TEXT Result is NULL pointer +**
NULL BLOB Result is NULL pointer +**
INTEGER FLOAT Convert from integer to float +**
INTEGER TEXT ASCII rendering of the integer +**
INTEGER BLOB Same as for INTEGER->TEXT +**
FLOAT INTEGER Convert from float to integer +**
FLOAT TEXT ASCII rendering of the float +**
FLOAT BLOB Same as FLOAT->TEXT +**
TEXT INTEGER Use atoi() +**
TEXT FLOAT Use atof() +**
TEXT BLOB No change +**
BLOB INTEGER Convert to TEXT then use atoi() +**
BLOB FLOAT Convert to TEXT then use atof() +**
BLOB TEXT Add a zero terminator if needed +**
+** +** +** The table above makes reference to standard C library functions atoi() +** and atof(). SQLite does not really use these functions. It has its +** on equavalent internal routines. The atoi() and atof() names are +** used in the table for brevity and because they are familiar to most +** C programmers. +** +** Note that when type conversions occur, pointers returned by prior +** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or +** sqlite3_column_text16() may be invalidated. +** Type conversions and pointer invalidations might occur +** in the following cases: +** +**
    +**
  • The initial content is a BLOB and sqlite3_column_text() +** or sqlite3_column_text16() is called. A zero-terminator might +** need to be added to the string.

  • +** +**
  • The initial content is UTF-8 text and sqlite3_column_bytes16() or +** sqlite3_column_text16() is called. The content must be converted +** to UTF-16.

  • +** +**
  • The initial content is UTF-16 text and sqlite3_column_bytes() or +** sqlite3_column_text() is called. The content must be converted +** to UTF-8.

  • +**
+** +** Conversions between UTF-16be and UTF-16le are always done in place and do +** not invalidate a prior pointer, though of course the content of the buffer +** that the prior pointer points to will have been modified. Other kinds +** of conversion are done in place when it is possible, but sometime it is +** not possible and in those cases prior pointers are invalidated. +** +** The safest and easiest to remember policy is to invoke these routines +** in one of the following ways: +** +**
    +**
  • sqlite3_column_text() followed by sqlite3_column_bytes()
  • +**
  • sqlite3_column_blob() followed by sqlite3_column_bytes()
  • +**
  • sqlite3_column_text16() followed by sqlite3_column_bytes16()
  • +**
+** +** In other words, you should call sqlite3_column_text(), sqlite3_column_blob(), +** or sqlite3_column_text16() first to force the result into the desired +** format, then invoke sqlite3_column_bytes() or sqlite3_column_bytes16() to +** find the size of the result. Do not mix call to sqlite3_column_text() or +** sqlite3_column_blob() with calls to sqlite3_column_bytes16(). And do not +** mix calls to sqlite3_column_text16() with calls to sqlite3_column_bytes(). +** +** The pointers returned are valid until a type conversion occurs as +** described above, or until [sqlite3_step()] or [sqlite3_reset()] or +** [sqlite3_finalize()] is called. The memory space used to hold strings +** and blobs is freed automatically. Do not pass the pointers returned +** [sqlite3_column_blob()], [sqlite3_column_text()], etc. into +** [sqlite3_free()]. +** +** If a memory allocation error occurs during the evaluation of any +** of these routines, a default value is returned. The default value +** is either the integer 0, the floating point number 0.0, or a NULL +** pointer. Subsequent calls to [sqlite3_errcode()] will return +** [SQLITE_NOMEM]. +*/ +const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); +int sqlite3_column_bytes(sqlite3_stmt*, int iCol); +int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); +double sqlite3_column_double(sqlite3_stmt*, int iCol); +int sqlite3_column_int(sqlite3_stmt*, int iCol); +sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); +const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); +const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); +int sqlite3_column_type(sqlite3_stmt*, int iCol); +sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); + +/* +** CAPI3REF: Destroy A Prepared Statement Object +** +** The sqlite3_finalize() function is called to delete a +** [sqlite3_stmt | compiled SQL statement]. If the statement was +** executed successfully, or not executed at all, then SQLITE_OK is returned. +** If execution of the statement failed then an +** [SQLITE_ERROR | error code] or [SQLITE_IOERR_READ | extended error code] +** is returned. +** +** This routine can be called at any point during the execution of the +** [sqlite3_stmt | virtual machine]. If the virtual machine has not +** completed execution when this routine is called, that is like +** encountering an error or an interrupt. (See [sqlite3_interrupt()].) +** Incomplete updates may be rolled back and transactions cancelled, +** depending on the circumstances, and the +** [SQLITE_ERROR | result code] returned will be [SQLITE_ABORT]. +*/ +int sqlite3_finalize(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Reset A Prepared Statement Object +** +** The sqlite3_reset() function is called to reset a +** [sqlite3_stmt | compiled SQL statement] object. +** back to it's initial state, ready to be re-executed. +** Any SQL statement variables that had values bound to them using +** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. +** Use [sqlite3_clear_bindings()] to reset the bindings. +*/ +int sqlite3_reset(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Create Or Redefine SQL Functions +** +** The following two functions are used to add SQL functions or aggregates +** or to redefine the behavior of existing SQL functions or aggregates. The +** difference only between the two is that the second parameter, the +** name of the (scalar) function or aggregate, is encoded in UTF-8 for +** sqlite3_create_function() and UTF-16 for sqlite3_create_function16(). +** +** The first argument is the [sqlite3 | database handle] that holds the +** SQL function or aggregate is to be added or redefined. If a single +** program uses more than one database handle internally, then SQL +** functions or aggregates must be added individually to each database +** handle with which they will be used. +** +** The second parameter is the name of the SQL function to be created +** or redefined. +** The length of the name is limited to 255 bytes, exclusive of the +** zero-terminator. Note that the name length limit is in bytes, not +** characters. Any attempt to create a function with a longer name +** will result in an SQLITE_ERROR error. +** +** The third parameter is the number of arguments that the SQL function or +** aggregate takes. If this parameter is negative, then the SQL function or +** aggregate may take any number of arguments. +** +** The fourth parameter, eTextRep, specifies what +** [SQLITE_UTF8 | text encoding] this SQL function prefers for +** its parameters. Any SQL function implementation should be able to work +** work with UTF-8, UTF-16le, or UTF-16be. But some implementations may be +** more efficient with one encoding than another. It is allowed to +** invoke sqlite3_create_function() or sqlite3_create_function16() multiple +** times with the same function but with different values of eTextRep. +** When multiple implementations of the same function are available, SQLite +** will pick the one that involves the least amount of data conversion. +** If there is only a single implementation which does not care what +** text encoding is used, then the fourth argument should be +** [SQLITE_ANY]. +** +** The fifth parameter is an arbitrary pointer. The implementation +** of the function can gain access to this pointer using +** [sqlite3_user_data()]. +** +** The seventh, eighth and ninth parameters, xFunc, xStep and xFinal, are +** pointers to C-language functions that implement the SQL +** function or aggregate. A scalar SQL function requires an implementation of +** the xFunc callback only, NULL pointers should be passed as the xStep +** and xFinal parameters. An aggregate SQL function requires an implementation +** of xStep and xFinal and NULL should be passed for xFunc. To delete an +** existing SQL function or aggregate, pass NULL for all three function +** callback. +** +** It is permitted to register multiple implementations of the same +** functions with the same name but with either differing numbers of +** arguments or differing perferred text encodings. SQLite will use +** the implementation most closely matches the way in which the +** SQL function is used. +*/ +int sqlite3_create_function( + sqlite3 *, + const char *zFunctionName, + int nArg, + int eTextRep, + void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +); +int sqlite3_create_function16( + sqlite3*, + const void *zFunctionName, + int nArg, + int eTextRep, + void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +); + +/* +** CAPI3REF: Text Encodings +** +** These constant define integer codes that represent the various +** text encodings supported by SQLite. +*/ +#define SQLITE_UTF8 1 +#define SQLITE_UTF16LE 2 +#define SQLITE_UTF16BE 3 +#define SQLITE_UTF16 4 /* Use native byte order */ +#define SQLITE_ANY 5 /* sqlite3_create_function only */ +#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ + +/* +** CAPI3REF: Obsolete Functions +** +** These functions are all now obsolete. In order to maintain +** backwards compatibility with older code, we continue to support +** these functions. However, new development projects should avoid +** the use of these functions. To help encourage people to avoid +** using these functions, we are not going to tell you want they do. +*/ +int sqlite3_aggregate_count(sqlite3_context*); +int sqlite3_expired(sqlite3_stmt*); +int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); +int sqlite3_global_recover(void); +void sqlite3_thread_cleanup(void); + +/* +** CAPI3REF: Obtaining SQL Function Parameter Values +** +** The C-language implementation of SQL functions and aggregates uses +** this set of interface routines to access the parameter values on +** the function or aggregate. +** +** The xFunc (for scalar functions) or xStep (for aggregates) parameters +** to [sqlite3_create_function()] and [sqlite3_create_function16()] +** define callbacks that implement the SQL functions and aggregates. +** The 4th parameter to these callbacks is an array of pointers to +** [sqlite3_value] objects. There is one [sqlite3_value] object for +** each parameter to the SQL function. These routines are used to +** extract values from the [sqlite3_value] objects. +** +** These routines work just like the corresponding +** [sqlite3_column_blob | sqlite3_column_* routines] except that +** these routines take a single [sqlite3_value*] pointer instead +** of an [sqlite3_stmt*] pointer and an integer column number. +** +** The sqlite3_value_text16() interface extracts a UTF16 string +** in the native byte-order of the host machine. The +** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces +** extract UTF16 strings as big-endian and little-endian respectively. +** +** The sqlite3_value_numeric_type() interface attempts to apply +** numeric affinity to the value. This means that an attempt is +** made to convert the value to an integer or floating point. If +** such a conversion is possible without loss of information (in order +** words if the value is original a string that looks like a number) +** then it is done. Otherwise no conversion occurs. The +** [SQLITE_INTEGER | datatype] after conversion is returned. +** +** Please pay particular attention to the fact that the pointer that +** is returned from [sqlite3_value_blob()], [sqlite3_value_text()], or +** [sqlite3_value_text16()] can be invalidated by a subsequent call to +** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], +** or [sqlite3_value_text16()]. +** +** These routines must be called from the same thread as +** the SQL function that supplied the sqlite3_value* parameters. +** Or, if the sqlite3_value* argument comes from the [sqlite3_column_value()] +** interface, then these routines should be called from the same thread +** that ran [sqlite3_column_value()]. +*/ +const void *sqlite3_value_blob(sqlite3_value*); +int sqlite3_value_bytes(sqlite3_value*); +int sqlite3_value_bytes16(sqlite3_value*); +double sqlite3_value_double(sqlite3_value*); +int sqlite3_value_int(sqlite3_value*); +sqlite3_int64 sqlite3_value_int64(sqlite3_value*); +const unsigned char *sqlite3_value_text(sqlite3_value*); +const void *sqlite3_value_text16(sqlite3_value*); +const void *sqlite3_value_text16le(sqlite3_value*); +const void *sqlite3_value_text16be(sqlite3_value*); +int sqlite3_value_type(sqlite3_value*); +int sqlite3_value_numeric_type(sqlite3_value*); + +/* +** CAPI3REF: Obtain Aggregate Function Context +** +** The implementation of aggregate SQL functions use this routine to allocate +** a structure for storing their state. The first time this routine +** is called for a particular aggregate, a new structure of size nBytes +** is allocated, zeroed, and returned. On subsequent calls (for the +** same aggregate instance) the same buffer is returned. The implementation +** of the aggregate can use the returned buffer to accumulate data. +** +** The buffer allocated is freed automatically by SQLite whan the aggregate +** query concludes. +** +** The first parameter should be a copy of the +** [sqlite3_context | SQL function context] that is the first +** parameter to the callback routine that implements the aggregate +** function. +** +** This routine must be called from the same thread in which +** the aggregate SQL function is running. +*/ +void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); + +/* +** CAPI3REF: User Data For Functions +** +** The pUserData parameter to the [sqlite3_create_function()] +** and [sqlite3_create_function16()] routines +** used to register user functions is available to +** the implementation of the function using this call. +** +** This routine must be called from the same thread in which +** the SQL function is running. +*/ +void *sqlite3_user_data(sqlite3_context*); + +/* +** CAPI3REF: Function Auxiliary Data +** +** The following two functions may be used by scalar SQL functions to +** associate meta-data with argument values. If the same value is passed to +** multiple invocations of the same SQL function during query execution, under +** some circumstances the associated meta-data may be preserved. This may +** be used, for example, to add a regular-expression matching scalar +** function. The compiled version of the regular expression is stored as +** meta-data associated with the SQL value passed as the regular expression +** pattern. The compiled regular expression can be reused on multiple +** invocations of the same function so that the original pattern string +** does not need to be recompiled on each invocation. +** +** The sqlite3_get_auxdata() interface returns a pointer to the meta-data +** associated with the Nth argument value to the current SQL function +** call, where N is the second parameter. If no meta-data has been set for +** that value, then a NULL pointer is returned. +** +** The sqlite3_set_auxdata() is used to associate meta-data with an SQL +** function argument. The third parameter is a pointer to the meta-data +** to be associated with the Nth user function argument value. The fourth +** parameter specifies a destructor that will be called on the meta- +** data pointer to release it when it is no longer required. If the +** destructor is NULL, it is not invoked. +** +** In practice, meta-data is preserved between function calls for +** expressions that are constant at compile time. This includes literal +** values and SQL variables. +** +** These routines must be called from the same thread in which +** the SQL function is running. +*/ +void *sqlite3_get_auxdata(sqlite3_context*, int); +void sqlite3_set_auxdata(sqlite3_context*, int, void*, void (*)(void*)); + + +/* +** CAPI3REF: Constants Defining Special Destructor Behavior +** +** These are special value for the destructor that is passed in as the +** final argument to routines like [sqlite3_result_blob()]. If the destructor +** argument is SQLITE_STATIC, it means that the content pointer is constant +** and will never change. It does not need to be destroyed. The +** SQLITE_TRANSIENT value means that the content will likely change in +** the near future and that SQLite should make its own private copy of +** the content before returning. +** +** The typedef is necessary to work around problems in certain +** C++ compilers. See ticket #2191. +*/ +typedef void (*sqlite3_destructor_type)(void*); +#define SQLITE_STATIC ((sqlite3_destructor_type)0) +#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) + +/* +** CAPI3REF: Setting The Result Of An SQL Function +** +** These routines are used by the xFunc or xFinal callbacks that +** implement SQL functions and aggregates. See +** [sqlite3_create_function()] and [sqlite3_create_function16()] +** for additional information. +** +** These functions work very much like the +** [sqlite3_bind_blob | sqlite3_bind_*] family of functions used +** to bind values to host parameters in prepared statements. +** Refer to the +** [sqlite3_bind_blob | sqlite3_bind_* documentation] for +** additional information. +** +** The sqlite3_result_error() and sqlite3_result_error16() functions +** cause the implemented SQL function to throw an exception. The +** parameter to sqlite3_result_error() or sqlite3_result_error16() +** is the text of an error message. +** +** The sqlite3_result_toobig() cause the function implementation +** to throw and error indicating that a string or BLOB is to long +** to represent. +** +** These routines must be called from within the same thread as +** the SQL function associated with the [sqlite3_context] pointer. +*/ +void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); +void sqlite3_result_double(sqlite3_context*, double); +void sqlite3_result_error(sqlite3_context*, const char*, int); +void sqlite3_result_error16(sqlite3_context*, const void*, int); +void sqlite3_result_error_toobig(sqlite3_context*); +void sqlite3_result_error_nomem(sqlite3_context*); +void sqlite3_result_int(sqlite3_context*, int); +void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); +void sqlite3_result_null(sqlite3_context*); +void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); +void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); +void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); +void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); +void sqlite3_result_value(sqlite3_context*, sqlite3_value*); +void sqlite3_result_zeroblob(sqlite3_context*, int n); + +/* +** CAPI3REF: Define New Collating Sequences +** +** These functions are used to add new collation sequences to the +** [sqlite3*] handle specified as the first argument. +** +** The name of the new collation sequence is specified as a UTF-8 string +** for sqlite3_create_collation() and sqlite3_create_collation_v2() +** and a UTF-16 string for sqlite3_create_collation16(). In all cases +** the name is passed as the second function argument. +** +** The third argument must be one of the constants [SQLITE_UTF8], +** [SQLITE_UTF16LE] or [SQLITE_UTF16BE], indicating that the user-supplied +** routine expects to be passed pointers to strings encoded using UTF-8, +** UTF-16 little-endian or UTF-16 big-endian respectively. +** +** A pointer to the user supplied routine must be passed as the fifth +** argument. If it is NULL, this is the same as deleting the collation +** sequence (so that SQLite cannot call it anymore). Each time the user +** supplied function is invoked, it is passed a copy of the void* passed as +** the fourth argument to sqlite3_create_collation() or +** sqlite3_create_collation16() as its first parameter. +** +** The remaining arguments to the user-supplied routine are two strings, +** each represented by a [length, data] pair and encoded in the encoding +** that was passed as the third argument when the collation sequence was +** registered. The user routine should return negative, zero or positive if +** the first string is less than, equal to, or greater than the second +** string. i.e. (STRING1 - STRING2). +** +** The sqlite3_create_collation_v2() works like sqlite3_create_collation() +** excapt that it takes an extra argument which is a destructor for +** the collation. The destructor is called when the collation is +** destroyed and is passed a copy of the fourth parameter void* pointer +** of the sqlite3_create_collation_v2(). Collations are destroyed when +** they are overridden by later calls to the collation creation functions +** or when the [sqlite3*] database handle is closed using [sqlite3_close()]. +** +** The sqlite3_create_collation_v2() interface is experimental and +** subject to change in future releases. The other collation creation +** functions are stable. +*/ +int sqlite3_create_collation( + sqlite3*, + const char *zName, + int eTextRep, + void*, + int(*xCompare)(void*,int,const void*,int,const void*) +); +int sqlite3_create_collation_v2( + sqlite3*, + const char *zName, + int eTextRep, + void*, + int(*xCompare)(void*,int,const void*,int,const void*), + void(*xDestroy)(void*) +); +int sqlite3_create_collation16( + sqlite3*, + const char *zName, + int eTextRep, + void*, + int(*xCompare)(void*,int,const void*,int,const void*) +); + +/* +** CAPI3REF: Collation Needed Callbacks +** +** To avoid having to register all collation sequences before a database +** can be used, a single callback function may be registered with the +** database handle to be called whenever an undefined collation sequence is +** required. +** +** If the function is registered using the sqlite3_collation_needed() API, +** then it is passed the names of undefined collation sequences as strings +** encoded in UTF-8. If sqlite3_collation_needed16() is used, the names +** are passed as UTF-16 in machine native byte order. A call to either +** function replaces any existing callback. +** +** When the callback is invoked, the first argument passed is a copy +** of the second argument to sqlite3_collation_needed() or +** sqlite3_collation_needed16(). The second argument is the database +** handle. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], or +** [SQLITE_UTF16LE], indicating the most desirable form of the collation +** sequence function required. The fourth parameter is the name of the +** required collation sequence. +** +** The callback function should register the desired collation using +** [sqlite3_create_collation()], [sqlite3_create_collation16()], or +** [sqlite3_create_collation_v2()]. +*/ +int sqlite3_collation_needed( + sqlite3*, + void*, + void(*)(void*,sqlite3*,int eTextRep,const char*) +); +int sqlite3_collation_needed16( + sqlite3*, + void*, + void(*)(void*,sqlite3*,int eTextRep,const void*) +); + +/* +** Specify the key for an encrypted database. This routine should be +** called right after sqlite3_open(). +** +** The code to implement this API is not available in the public release +** of SQLite. +*/ +int sqlite3_key( + sqlite3 *db, /* Database to be rekeyed */ + const void *pKey, int nKey /* The key */ +); + +/* +** Change the key on an open database. If the current database is not +** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the +** database is decrypted. +** +** The code to implement this API is not available in the public release +** of SQLite. +*/ +int sqlite3_rekey( + sqlite3 *db, /* Database to be rekeyed */ + const void *pKey, int nKey /* The new key */ +); + +/* +** CAPI3REF: Suspend Execution For A Short Time +** +** This function causes the current thread to suspend execution +** a number of milliseconds specified in its parameter. +** +** If the operating system does not support sleep requests with +** millisecond time resolution, then the time will be rounded up to +** the nearest second. The number of milliseconds of sleep actually +** requested from the operating system is returned. +** +** SQLite implements this interface by calling the xSleep() +** method of the default [sqlite3_vfs] object. +*/ +int sqlite3_sleep(int); + +/* +** CAPI3REF: Name Of The Folder Holding Temporary Files +** +** If this global variable is made to point to a string which is +** the name of a folder (a.ka. directory), then all temporary files +** created by SQLite will be placed in that directory. If this variable +** is NULL pointer, then SQLite does a search for an appropriate temporary +** file directory. +** +** It is not safe to modify this variable once a database connection +** has been opened. It is intended that this variable be set once +** as part of process initialization and before any SQLite interface +** routines have been call and remain unchanged thereafter. +*/ +SQLITE_EXTERN char *sqlite3_temp_directory; + +/* +** CAPI3REF: Test To See If The Database Is In Auto-Commit Mode +** +** Test to see whether or not the database connection is in autocommit +** mode. Return TRUE if it is and FALSE if not. Autocommit mode is on +** by default. Autocommit is disabled by a BEGIN statement and reenabled +** by the next COMMIT or ROLLBACK. +** +** If certain kinds of errors occur on a statement within a multi-statement +** transactions (errors including [SQLITE_FULL], [SQLITE_IOERR], +** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the +** transaction might be rolled back automatically. The only way to +** find out if SQLite automatically rolled back the transaction after +** an error is to use this function. +** +** If another thread changes the autocommit status of the database +** connection while this routine is running, then the return value +** is undefined. +*/ +int sqlite3_get_autocommit(sqlite3*); + +/* +** CAPI3REF: Find The Database Handle Associated With A Prepared Statement +** +** Return the [sqlite3*] database handle to which a +** [sqlite3_stmt | prepared statement] belongs. +** This is the same database handle that was +** the first argument to the [sqlite3_prepare_v2()] or its variants +** that was used to create the statement in the first place. +*/ +sqlite3 *sqlite3_db_handle(sqlite3_stmt*); + + +/* +** CAPI3REF: Commit And Rollback Notification Callbacks +** +** These routines +** register callback functions to be invoked whenever a transaction +** is committed or rolled back. The pArg argument is passed through +** to the callback. If the callback on a commit hook function +** returns non-zero, then the commit is converted into a rollback. +** +** If another function was previously registered, its pArg value is returned. +** Otherwise NULL is returned. +** +** Registering a NULL function disables the callback. +** +** For the purposes of this API, a transaction is said to have been +** rolled back if an explicit "ROLLBACK" statement is executed, or +** an error or constraint causes an implicit rollback to occur. The +** callback is not invoked if a transaction is automatically rolled +** back because the database connection is closed. +** +** These are experimental interfaces and are subject to change. +*/ +void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); +void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); + +/* +** CAPI3REF: Data Change Notification Callbacks +** +** Register a callback function with the database connection identified by the +** first argument to be invoked whenever a row is updated, inserted or deleted. +** Any callback set by a previous call to this function for the same +** database connection is overridden. +** +** The second argument is a pointer to the function to invoke when a +** row is updated, inserted or deleted. The first argument to the callback is +** a copy of the third argument to sqlite3_update_hook(). The second callback +** argument is one of SQLITE_INSERT, SQLITE_DELETE or SQLITE_UPDATE, depending +** on the operation that caused the callback to be invoked. The third and +** fourth arguments to the callback contain pointers to the database and +** table name containing the affected row. The final callback parameter is +** the rowid of the row. In the case of an update, this is the rowid after +** the update takes place. +** +** The update hook is not invoked when internal system tables are +** modified (i.e. sqlite_master and sqlite_sequence). +** +** If another function was previously registered, its pArg value is returned. +** Otherwise NULL is returned. +*/ +void *sqlite3_update_hook( + sqlite3*, + void(*)(void *,int ,char const *,char const *,sqlite3_int64), + void* +); + +/* +** CAPI3REF: Enable Or Disable Shared Pager Cache +** +** This routine enables or disables the sharing of the database cache +** and schema data structures between connections to the same database. +** Sharing is enabled if the argument is true and disabled if the argument +** is false. +** +** Beginning in SQLite version 3.5.0, cache sharing is enabled and disabled +** for an entire process. In prior versions of SQLite, sharing was +** enabled or disabled for each thread separately. +** +** The cache sharing mode set by this interface effects all subsequent +** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. +** Existing database connections continue use the sharing mode that was +** in effect at the time they were opened. +** +** Virtual tables cannot be used with a shared cache. When shared +** cache is enabled, the [sqlite3_create_module()] API used to register +** virtual tables will always return an error. +** +** This routine returns [SQLITE_OK] if shared cache was +** enabled or disabled successfully. An [SQLITE_ERROR | error code] +** is returned otherwise. +** +** Shared cache is disabled by default. But this might change in +** future releases of SQLite. Applications that care about shared +** cache setting should set it explicitly. +*/ +int sqlite3_enable_shared_cache(int); + +/* +** CAPI3REF: Attempt To Free Heap Memory +** +** Attempt to free N bytes of heap memory by deallocating non-essential +** memory allocations held by the database library (example: memory +** used to cache database pages to improve performance). +*/ +int sqlite3_release_memory(int); + +/* +** CAPI3REF: Impose A Limit On Heap Size +** +** Place a "soft" limit on the amount of heap memory that may be allocated +** by SQLite. If an internal allocation is requested +** that would exceed the specified limit, [sqlite3_release_memory()] is +** invoked one or more times to free up some space before the allocation +** is made. +** +** The limit is called "soft", because if [sqlite3_release_memory()] cannot +** free sufficient memory to prevent the limit from being exceeded, +** the memory is allocated anyway and the current operation proceeds. +** +** A negative or zero value for N means that there is no soft heap limit and +** [sqlite3_release_memory()] will only be called when memory is exhausted. +** The default value for the soft heap limit is zero. +** +** SQLite makes a best effort to honor the soft heap limit. But if it +** is unable to reduce memory usage below the soft limit, execution will +** continue without error or notification. This is why the limit is +** called a "soft" limit. It is advisory only. +** +** The soft heap limit is implemented using the [sqlite3_memory_alarm()] +** interface. Only a single memory alarm is available in the default +** implementation. This means that if the application also uses the +** memory alarm interface it will interfere with the operation of the +** soft heap limit and undefined behavior will result. +** +** Prior to SQLite version 3.5.0, this routine only constrained the memory +** allocated by a single thread - the same thread in which this routine +** runs. Beginning with SQLite version 3.5.0, the soft heap limit is +** applied to all threads. The value specified for the soft heap limit +** is an upper bound on the total memory allocation for all threads. In +** version 3.5.0 there is no mechanism for limiting the heap usage for +** individual threads. +*/ +void sqlite3_soft_heap_limit(int); + +/* +** CAPI3REF: Extract Metadata About A Column Of A Table +** +** This routine +** returns meta-data about a specific column of a specific database +** table accessible using the connection handle passed as the first function +** argument. +** +** The column is identified by the second, third and fourth parameters to +** this function. The second parameter is either the name of the database +** (i.e. "main", "temp" or an attached database) containing the specified +** table or NULL. If it is NULL, then all attached databases are searched +** for the table using the same algorithm as the database engine uses to +** resolve unqualified table references. +** +** The third and fourth parameters to this function are the table and column +** name of the desired column, respectively. Neither of these parameters +** may be NULL. +** +** Meta information is returned by writing to the memory locations passed as +** the 5th and subsequent parameters to this function. Any of these +** arguments may be NULL, in which case the corresponding element of meta +** information is ommitted. +** +**
+** Parameter     Output Type      Description
+** -----------------------------------
+**
+**   5th         const char*      Data type
+**   6th         const char*      Name of the default collation sequence 
+**   7th         int              True if the column has a NOT NULL constraint
+**   8th         int              True if the column is part of the PRIMARY KEY
+**   9th         int              True if the column is AUTOINCREMENT
+** 
+** +** +** The memory pointed to by the character pointers returned for the +** declaration type and collation sequence is valid only until the next +** call to any sqlite API function. +** +** If the specified table is actually a view, then an error is returned. +** +** If the specified column is "rowid", "oid" or "_rowid_" and an +** INTEGER PRIMARY KEY column has been explicitly declared, then the output +** parameters are set for the explicitly declared column. If there is no +** explicitly declared IPK column, then the output parameters are set as +** follows: +** +**
+**     data type: "INTEGER"
+**     collation sequence: "BINARY"
+**     not null: 0
+**     primary key: 1
+**     auto increment: 0
+** 
+** +** This function may load one or more schemas from database files. If an +** error occurs during this process, or if the requested table or column +** cannot be found, an SQLITE error code is returned and an error message +** left in the database handle (to be retrieved using sqlite3_errmsg()). +** +** This API is only available if the library was compiled with the +** SQLITE_ENABLE_COLUMN_METADATA preprocessor symbol defined. +*/ +int sqlite3_table_column_metadata( + sqlite3 *db, /* Connection handle */ + const char *zDbName, /* Database name or NULL */ + const char *zTableName, /* Table name */ + const char *zColumnName, /* Column name */ + char const **pzDataType, /* OUTPUT: Declared data type */ + char const **pzCollSeq, /* OUTPUT: Collation sequence name */ + int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ + int *pPrimaryKey, /* OUTPUT: True if column part of PK */ + int *pAutoinc /* OUTPUT: True if column is auto-increment */ +); + +/* +** CAPI3REF: Load An Extension +** +** Attempt to load an SQLite extension library contained in the file +** zFile. The entry point is zProc. zProc may be 0 in which case the +** name of the entry point defaults to "sqlite3_extension_init". +** +** Return [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong. +** +** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with +** error message text. The calling function should free this memory +** by calling [sqlite3_free()]. +** +** Extension loading must be enabled using [sqlite3_enable_load_extension()] +** prior to calling this API or an error will be returned. +*/ +int sqlite3_load_extension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Derived from zFile if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +); + +/* +** CAPI3REF: Enable Or Disable Extension Loading +** +** So as not to open security holes in older applications that are +** unprepared to deal with extension loading, and as a means of disabling +** extension loading while evaluating user-entered SQL, the following +** API is provided to turn the [sqlite3_load_extension()] mechanism on and +** off. It is off by default. See ticket #1863. +** +** Call this routine with onoff==1 to turn extension loading on +** and call it with onoff==0 to turn it back off again. +*/ +int sqlite3_enable_load_extension(sqlite3 *db, int onoff); + +/* +** CAPI3REF: Make Arrangements To Automatically Load An Extension +** +** Register an extension entry point that is automatically invoked +** whenever a new database connection is opened using +** [sqlite3_open()], [sqlite3_open16()], or [sqlite3_open_v2()]. +** +** This API can be invoked at program startup in order to register +** one or more statically linked extensions that will be available +** to all new database connections. +** +** Duplicate extensions are detected so calling this routine multiple +** times with the same extension is harmless. +** +** This routine stores a pointer to the extension in an array +** that is obtained from malloc(). If you run a memory leak +** checker on your program and it reports a leak because of this +** array, then invoke [sqlite3_automatic_extension_reset()] prior +** to shutdown to free the memory. +** +** Automatic extensions apply across all threads. +** +** This interface is experimental and is subject to change or +** removal in future releases of SQLite. +*/ +int sqlite3_auto_extension(void *xEntryPoint); + + +/* +** CAPI3REF: Reset Automatic Extension Loading +** +** Disable all previously registered automatic extensions. This +** routine undoes the effect of all prior [sqlite3_automatic_extension()] +** calls. +** +** This call disabled automatic extensions in all threads. +** +** This interface is experimental and is subject to change or +** removal in future releases of SQLite. +*/ +void sqlite3_reset_auto_extension(void); + + +/* +****** EXPERIMENTAL - subject to change without notice ************** +** +** The interface to the virtual-table mechanism is currently considered +** to be experimental. The interface might change in incompatible ways. +** If this is a problem for you, do not use the interface at this time. +** +** When the virtual-table mechanism stablizes, we will declare the +** interface fixed, support it indefinitely, and remove this comment. +*/ + +/* +** Structures used by the virtual table interface +*/ +typedef struct sqlite3_vtab sqlite3_vtab; +typedef struct sqlite3_index_info sqlite3_index_info; +typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor; +typedef struct sqlite3_module sqlite3_module; + +/* +** A module is a class of virtual tables. Each module is defined +** by an instance of the following structure. This structure consists +** mostly of methods for the module. +*/ +struct sqlite3_module { + int iVersion; + int (*xCreate)(sqlite3*, void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, char**); + int (*xConnect)(sqlite3*, void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, char**); + int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*); + int (*xDisconnect)(sqlite3_vtab *pVTab); + int (*xDestroy)(sqlite3_vtab *pVTab); + int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor); + int (*xClose)(sqlite3_vtab_cursor*); + int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, + int argc, sqlite3_value **argv); + int (*xNext)(sqlite3_vtab_cursor*); + int (*xEof)(sqlite3_vtab_cursor*); + int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int); + int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid); + int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *); + int (*xBegin)(sqlite3_vtab *pVTab); + int (*xSync)(sqlite3_vtab *pVTab); + int (*xCommit)(sqlite3_vtab *pVTab); + int (*xRollback)(sqlite3_vtab *pVTab); + int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg); + + int (*xRename)(sqlite3_vtab *pVtab, const char *zNew); +}; + +/* +** The sqlite3_index_info structure and its substructures is used to +** pass information into and receive the reply from the xBestIndex +** method of an sqlite3_module. The fields under **Inputs** are the +** inputs to xBestIndex and are read-only. xBestIndex inserts its +** results into the **Outputs** fields. +** +** The aConstraint[] array records WHERE clause constraints of the +** form: +** +** column OP expr +** +** Where OP is =, <, <=, >, or >=. The particular operator is stored +** in aConstraint[].op. The index of the column is stored in +** aConstraint[].iColumn. aConstraint[].usable is TRUE if the +** expr on the right-hand side can be evaluated (and thus the constraint +** is usable) and false if it cannot. +** +** The optimizer automatically inverts terms of the form "expr OP column" +** and makes other simplifications to the WHERE clause in an attempt to +** get as many WHERE clause terms into the form shown above as possible. +** The aConstraint[] array only reports WHERE clause terms in the correct +** form that refer to the particular virtual table being queried. +** +** Information about the ORDER BY clause is stored in aOrderBy[]. +** Each term of aOrderBy records a column of the ORDER BY clause. +** +** The xBestIndex method must fill aConstraintUsage[] with information +** about what parameters to pass to xFilter. If argvIndex>0 then +** the right-hand side of the corresponding aConstraint[] is evaluated +** and becomes the argvIndex-th entry in argv. If aConstraintUsage[].omit +** is true, then the constraint is assumed to be fully handled by the +** virtual table and is not checked again by SQLite. +** +** The idxNum and idxPtr values are recorded and passed into xFilter. +** sqlite3_free() is used to free idxPtr if needToFreeIdxPtr is true. +** +** The orderByConsumed means that output from xFilter will occur in +** the correct order to satisfy the ORDER BY clause so that no separate +** sorting step is required. +** +** The estimatedCost value is an estimate of the cost of doing the +** particular lookup. A full scan of a table with N entries should have +** a cost of N. A binary search of a table of N entries should have a +** cost of approximately log(N). +*/ +struct sqlite3_index_info { + /* Inputs */ + int nConstraint; /* Number of entries in aConstraint */ + struct sqlite3_index_constraint { + int iColumn; /* Column on left-hand side of constraint */ + unsigned char op; /* Constraint operator */ + unsigned char usable; /* True if this constraint is usable */ + int iTermOffset; /* Used internally - xBestIndex should ignore */ + } *aConstraint; /* Table of WHERE clause constraints */ + int nOrderBy; /* Number of terms in the ORDER BY clause */ + struct sqlite3_index_orderby { + int iColumn; /* Column number */ + unsigned char desc; /* True for DESC. False for ASC. */ + } *aOrderBy; /* The ORDER BY clause */ + + /* Outputs */ + struct sqlite3_index_constraint_usage { + int argvIndex; /* if >0, constraint is part of argv to xFilter */ + unsigned char omit; /* Do not code a test for this constraint */ + } *aConstraintUsage; + int idxNum; /* Number used to identify the index */ + char *idxStr; /* String, possibly obtained from sqlite3_malloc */ + int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ + int orderByConsumed; /* True if output is already ordered */ + double estimatedCost; /* Estimated cost of using this index */ +}; +#define SQLITE_INDEX_CONSTRAINT_EQ 2 +#define SQLITE_INDEX_CONSTRAINT_GT 4 +#define SQLITE_INDEX_CONSTRAINT_LE 8 +#define SQLITE_INDEX_CONSTRAINT_LT 16 +#define SQLITE_INDEX_CONSTRAINT_GE 32 +#define SQLITE_INDEX_CONSTRAINT_MATCH 64 + +/* +** This routine is used to register a new module name with an SQLite +** connection. Module names must be registered before creating new +** virtual tables on the module, or before using preexisting virtual +** tables of the module. +*/ +int sqlite3_create_module( + sqlite3 *db, /* SQLite connection to register module with */ + const char *zName, /* Name of the module */ + const sqlite3_module *, /* Methods for the module */ + void * /* Client data for xCreate/xConnect */ +); + +/* +** This routine is identical to the sqlite3_create_module() method above, +** except that it allows a destructor function to be specified. It is +** even more experimental than the rest of the virtual tables API. +*/ +int sqlite3_create_module_v2( + sqlite3 *db, /* SQLite connection to register module with */ + const char *zName, /* Name of the module */ + const sqlite3_module *, /* Methods for the module */ + void *, /* Client data for xCreate/xConnect */ + void(*xDestroy)(void*) /* Module destructor function */ +); + +/* +** Every module implementation uses a subclass of the following structure +** to describe a particular instance of the module. Each subclass will +** be tailored to the specific needs of the module implementation. The +** purpose of this superclass is to define certain fields that are common +** to all module implementations. +** +** Virtual tables methods can set an error message by assigning a +** string obtained from sqlite3_mprintf() to zErrMsg. The method should +** take care that any prior string is freed by a call to sqlite3_free() +** prior to assigning a new string to zErrMsg. After the error message +** is delivered up to the client application, the string will be automatically +** freed by sqlite3_free() and the zErrMsg field will be zeroed. Note +** that sqlite3_mprintf() and sqlite3_free() are used on the zErrMsg field +** since virtual tables are commonly implemented in loadable extensions which +** do not have access to sqlite3MPrintf() or sqlite3Free(). +*/ +struct sqlite3_vtab { + const sqlite3_module *pModule; /* The module for this virtual table */ + int nRef; /* Used internally */ + char *zErrMsg; /* Error message from sqlite3_mprintf() */ + /* Virtual table implementations will typically add additional fields */ +}; + +/* Every module implementation uses a subclass of the following structure +** to describe cursors that point into the virtual table and are used +** to loop through the virtual table. Cursors are created using the +** xOpen method of the module. Each module implementation will define +** the content of a cursor structure to suit its own needs. +** +** This superclass exists in order to define fields of the cursor that +** are common to all implementations. +*/ +struct sqlite3_vtab_cursor { + sqlite3_vtab *pVtab; /* Virtual table of this cursor */ + /* Virtual table implementations will typically add additional fields */ +}; + +/* +** The xCreate and xConnect methods of a module use the following API +** to declare the format (the names and datatypes of the columns) of +** the virtual tables they implement. +*/ +int sqlite3_declare_vtab(sqlite3*, const char *zCreateTable); + +/* +** Virtual tables can provide alternative implementations of functions +** using the xFindFunction method. But global versions of those functions +** must exist in order to be overloaded. +** +** This API makes sure a global version of a function with a particular +** name and number of parameters exists. If no such function exists +** before this API is called, a new function is created. The implementation +** of the new function always causes an exception to be thrown. So +** the new function is not good for anything by itself. Its only +** purpose is to be a place-holder function that can be overloaded +** by virtual tables. +** +** This API should be considered part of the virtual table interface, +** which is experimental and subject to change. +*/ +int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); + +/* +** The interface to the virtual-table mechanism defined above (back up +** to a comment remarkably similar to this one) is currently considered +** to be experimental. The interface might change in incompatible ways. +** If this is a problem for you, do not use the interface at this time. +** +** When the virtual-table mechanism stabilizes, we will declare the +** interface fixed, support it indefinitely, and remove this comment. +** +****** EXPERIMENTAL - subject to change without notice ************** +*/ + +/* +** CAPI3REF: A Handle To An Open BLOB +** +** An instance of the following opaque structure is used to +** represent an blob-handle. A blob-handle is created by +** [sqlite3_blob_open()] and destroyed by [sqlite3_blob_close()]. +** The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces +** can be used to read or write small subsections of the blob. +** The [sqlite3_blob_bytes()] interface returns the size of the +** blob in bytes. +*/ +typedef struct sqlite3_blob sqlite3_blob; + +/* +** CAPI3REF: Open A BLOB For Incremental I/O +** +** Open a handle to the blob located in row iRow,, column zColumn, +** table zTable in database zDb. i.e. the same blob that would +** be selected by: +** +**
+**     SELECT zColumn FROM zDb.zTable WHERE rowid = iRow;
+** 
+** +** If the flags parameter is non-zero, the blob is opened for +** read and write access. If it is zero, the blob is opened for read +** access. +** +** On success, [SQLITE_OK] is returned and the new +** [sqlite3_blob | blob handle] is written to *ppBlob. +** Otherwise an error code is returned and +** any value written to *ppBlob should not be used by the caller. +** This function sets the database-handle error code and message +** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()]. +*/ +int sqlite3_blob_open( + sqlite3*, + const char *zDb, + const char *zTable, + const char *zColumn, + sqlite3_int64 iRow, + int flags, + sqlite3_blob **ppBlob +); + +/* +** CAPI3REF: Close A BLOB Handle +** +** Close an open [sqlite3_blob | blob handle]. +*/ +int sqlite3_blob_close(sqlite3_blob *); + +/* +** CAPI3REF: Return The Size Of An Open BLOB +** +** Return the size in bytes of the blob accessible via the open +** [sqlite3_blob | blob-handle] passed as an argument. +*/ +int sqlite3_blob_bytes(sqlite3_blob *); + +/* +** CAPI3REF: Read Data From A BLOB Incrementally +** +** This function is used to read data from an open +** [sqlite3_blob | blob-handle] into a caller supplied buffer. +** n bytes of data are copied into buffer +** z from the open blob, starting at offset iOffset. +** +** On success, SQLITE_OK is returned. Otherwise, an +** [SQLITE_ERROR | SQLite error code] or an +** [SQLITE_IOERR_READ | extended error code] is returned. +*/ +int sqlite3_blob_read(sqlite3_blob *, void *z, int n, int iOffset); + +/* +** CAPI3REF: Write Data Into A BLOB Incrementally +** +** This function is used to write data into an open +** [sqlite3_blob | blob-handle] from a user supplied buffer. +** n bytes of data are copied from the buffer +** pointed to by z into the open blob, starting at offset iOffset. +** +** If the [sqlite3_blob | blob-handle] passed as the first argument +** was not opened for writing (the flags parameter to [sqlite3_blob_open()] +*** was zero), this function returns [SQLITE_READONLY]. +** +** This function may only modify the contents of the blob, it is +** not possible to increase the size of a blob using this API. If +** offset iOffset is less than n bytes from the end of the blob, +** [SQLITE_ERROR] is returned and no data is written. +** +** On success, SQLITE_OK is returned. Otherwise, an +** [SQLITE_ERROR | SQLite error code] or an +** [SQLITE_IOERR_READ | extended error code] is returned. +*/ +int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); + +/* +** CAPI3REF: Virtual File System Objects +** +** A virtual filesystem (VFS) is an [sqlite3_vfs] object +** that SQLite uses to interact +** with the underlying operating system. Most builds come with a +** single default VFS that is appropriate for the host computer. +** New VFSes can be registered and existing VFSes can be unregistered. +** The following interfaces are provided. +** +** The sqlite3_vfs_find() interface returns a pointer to a VFS given its +** name. Names are case sensitive. If there is no match, a NULL +** pointer is returned. If zVfsName is NULL then the default +** VFS is returned. +** +** New VFSes are registered with sqlite3_vfs_register(). Each +** new VFS becomes the default VFS if the makeDflt flag is set. +** The same VFS can be registered multiple times without injury. +** To make an existing VFS into the default VFS, register it again +** with the makeDflt flag set. If two different VFSes with the +** same name are registered, the behavior is undefined. If a +** VFS is registered with a name that is NULL or an empty string, +** then the behavior is undefined. +** +** Unregister a VFS with the sqlite3_vfs_unregister() interface. +** If the default VFS is unregistered, another VFS is chosen as +** the default. The choice for the new VFS is arbitrary. +*/ +sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); +int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); +int sqlite3_vfs_unregister(sqlite3_vfs*); + +/* +** CAPI3REF: Mutexes +** +** The SQLite core uses these routines for thread +** synchronization. Though they are intended for internal +** use by SQLite, code that links against SQLite is +** permitted to use any of these routines. +** +** The SQLite source code contains multiple implementations +** of these mutex routines. An appropriate implementation +** is selected automatically at compile-time. The following +** implementations are available in the SQLite core: +** +**
    +**
  • SQLITE_MUTEX_OS2 +**
  • SQLITE_MUTEX_PTHREAD +**
  • SQLITE_MUTEX_W32 +**
  • SQLITE_MUTEX_NOOP +**
+** +** The SQLITE_MUTEX_NOOP implementation is a set of routines +** that does no real locking and is appropriate for use in +** a single-threaded application. The SQLITE_MUTEX_OS2, +** SQLITE_MUTEX_PTHREAD, and SQLITE_MUTEX_W32 implementations +** are appropriate for use on os/2, unix, and windows. +** +** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor +** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex +** implementation is included with the library. The +** mutex interface routines defined here become external +** references in the SQLite library for which implementations +** must be provided by the application. This facility allows an +** application that links against SQLite to provide its own mutex +** implementation without having to modify the SQLite core. +** +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. SQLite +** will unwind its stack and return an error. The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST +**
  • SQLITE_MUTEX_RECURSIVE +**
  • SQLITE_MUTEX_STATIC_MASTER +**
  • SQLITE_MUTEX_STATIC_MEM +**
  • SQLITE_MUTEX_STATIC_MEM2 +**
  • SQLITE_MUTEX_STATIC_PRNG +**
  • SQLITE_MUTEX_STATIC_LRU +**
+** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Four static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +** +** The sqlite3_mutex_free() routine deallocates a previously +** allocated dynamic mutex. SQLite is careful to deallocate every +** dynamic mutex that it allocates. The dynamic mutexes must not be in +** use when they are deallocated. Attempting to deallocate a static +** mutex results in undefined behavior. SQLite never deallocates +** a static mutex. +** +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. SQLite will never exhibit +** such behavior in its own use of mutexes. +** +** Some systems (ex: windows95) do not the operation implemented by +** sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() will +** always return SQLITE_BUSY. The SQLite core only ever uses +** sqlite3_mutex_try() as an optimization so this is acceptable behavior. +** +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered by the +** calling thread or is not currently allocated. SQLite will +** never do either. +** +** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. +*/ +sqlite3_mutex *sqlite3_mutex_alloc(int); +void sqlite3_mutex_free(sqlite3_mutex*); +void sqlite3_mutex_enter(sqlite3_mutex*); +int sqlite3_mutex_try(sqlite3_mutex*); +void sqlite3_mutex_leave(sqlite3_mutex*); + +/* +** CAPI3REF: Mutex Verifcation Routines +** +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines +** are intended for use inside assert() statements. The SQLite core +** never uses these routines except inside an assert() and applications +** are advised to follow the lead of the core. The core only +** provides implementations for these routines when it is compiled +** with the SQLITE_DEBUG flag. External mutex implementations +** are only required to provide these routines if SQLITE_DEBUG is +** defined and if NDEBUG is not defined. +** +** These routines should return true if the mutex in their argument +** is held or not held, respectively, by the calling thread. +** +** The implementation is not required to provided versions of these +** routines that actually work. +** If the implementation does not provide working +** versions of these routines, it should at least provide stubs +** that always return true so that one does not get spurious +** assertion failures. +** +** If the argument to sqlite3_mutex_held() is a NULL pointer then +** the routine should return 1. This seems counter-intuitive since +** clearly the mutex cannot be held if it does not exist. But the +** the reason the mutex does not exist is because the build is not +** using mutexes. And we do not want the assert() containing the +** call to sqlite3_mutex_held() to fail, so a non-zero return is +** the appropriate thing to do. The sqlite3_mutex_notheld() +** interface should also return 1 when given a NULL pointer. +*/ +int sqlite3_mutex_held(sqlite3_mutex*); +int sqlite3_mutex_notheld(sqlite3_mutex*); + +/* +** CAPI3REF: Mutex Types +** +** The [sqlite3_mutex_alloc()] interface takes a single argument +** which is one of these integer constants. +*/ +#define SQLITE_MUTEX_FAST 0 +#define SQLITE_MUTEX_RECURSIVE 1 +#define SQLITE_MUTEX_STATIC_MASTER 2 +#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ +#define SQLITE_MUTEX_STATIC_MEM2 4 /* sqlite3_release_memory() */ +#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ +#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ + +/* +** CAPI3REF: Low-Level Control Of Database Files +** +** The [sqlite3_file_control()] interface makes a direct call to the +** xFileControl method for the [sqlite3_io_methods] object associated +** with a particular database identified by the second argument. The +** name of the database is the name assigned to the database by the +** ATTACH SQL command that opened the +** database. To control the main database file, use the name "main" +** or a NULL pointer. The third and fourth parameters to this routine +** are passed directly through to the second and third parameters of +** the xFileControl method. The return value of the xFileControl +** method becomes the return value of this routine. +** +** If the second parameter (zDbName) does not match the name of any +** open database file, then SQLITE_ERROR is returned. This error +** code is not remembered and will not be recalled by [sqlite3_errcode()] +** or [sqlite3_errmsg()]. The underlying xFileControl method might +** also return SQLITE_ERROR. There is no way to distinguish between +** an incorrect zDbName and an SQLITE_ERROR return from the underlying +** xFileControl method. +** +** See also: [SQLITE_FCNTL_LOCKSTATE] +*/ +int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); + +/* +** Undo the hack that converts floating point types to integer for +** builds on processors without floating point support. +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# undef double +#endif + +#ifdef __cplusplus +} /* End of the 'extern "C"' block */ +#endif +#endif diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/sqlite3ext.h b/libraries/sqlite/unix/sqlite-3.5.1/src/sqlite3ext.h new file mode 100644 index 0000000000..5d4c2dec96 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/sqlite3ext.h @@ -0,0 +1,350 @@ +/* +** 2006 June 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the SQLite interface for use by +** shared libraries that want to be imported as extensions into +** an SQLite instance. Shared libraries that intend to be loaded +** as extensions by SQLite should #include this file instead of +** sqlite3.h. +** +** @(#) $Id: sqlite3ext.h,v 1.17 2007/08/31 16:11:36 drh Exp $ +*/ +#ifndef _SQLITE3EXT_H_ +#define _SQLITE3EXT_H_ +#include "sqlite3.h" + +typedef struct sqlite3_api_routines sqlite3_api_routines; + +/* +** The following structure hold pointers to all of the SQLite API +** routines. +** +** WARNING: In order to maintain backwards compatibility, add new +** interfaces to the end of this structure only. If you insert new +** interfaces in the middle of this structure, then older different +** versions of SQLite will not be able to load each others shared +** libraries! +*/ +struct sqlite3_api_routines { + void * (*aggregate_context)(sqlite3_context*,int nBytes); + int (*aggregate_count)(sqlite3_context*); + int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); + int (*bind_double)(sqlite3_stmt*,int,double); + int (*bind_int)(sqlite3_stmt*,int,int); + int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); + int (*bind_null)(sqlite3_stmt*,int); + int (*bind_parameter_count)(sqlite3_stmt*); + int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); + const char * (*bind_parameter_name)(sqlite3_stmt*,int); + int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); + int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); + int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); + int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); + int (*busy_timeout)(sqlite3*,int ms); + int (*changes)(sqlite3*); + int (*close)(sqlite3*); + int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*,int eTextRep,const char*)); + int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*,int eTextRep,const void*)); + const void * (*column_blob)(sqlite3_stmt*,int iCol); + int (*column_bytes)(sqlite3_stmt*,int iCol); + int (*column_bytes16)(sqlite3_stmt*,int iCol); + int (*column_count)(sqlite3_stmt*pStmt); + const char * (*column_database_name)(sqlite3_stmt*,int); + const void * (*column_database_name16)(sqlite3_stmt*,int); + const char * (*column_decltype)(sqlite3_stmt*,int i); + const void * (*column_decltype16)(sqlite3_stmt*,int); + double (*column_double)(sqlite3_stmt*,int iCol); + int (*column_int)(sqlite3_stmt*,int iCol); + sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); + const char * (*column_name)(sqlite3_stmt*,int); + const void * (*column_name16)(sqlite3_stmt*,int); + const char * (*column_origin_name)(sqlite3_stmt*,int); + const void * (*column_origin_name16)(sqlite3_stmt*,int); + const char * (*column_table_name)(sqlite3_stmt*,int); + const void * (*column_table_name16)(sqlite3_stmt*,int); + const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); + const void * (*column_text16)(sqlite3_stmt*,int iCol); + int (*column_type)(sqlite3_stmt*,int iCol); + sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); + void * (*commit_hook)(sqlite3*,int(*)(void*),void*); + int (*complete)(const char*sql); + int (*complete16)(const void*sql); + int (*create_collation)(sqlite3*,const char*,int,void*,int(*)(void*,int,const void*,int,const void*)); + int (*create_collation16)(sqlite3*,const char*,int,void*,int(*)(void*,int,const void*,int,const void*)); + int (*create_function)(sqlite3*,const char*,int,int,void*,void (*xFunc)(sqlite3_context*,int,sqlite3_value**),void (*xStep)(sqlite3_context*,int,sqlite3_value**),void (*xFinal)(sqlite3_context*)); + int (*create_function16)(sqlite3*,const void*,int,int,void*,void (*xFunc)(sqlite3_context*,int,sqlite3_value**),void (*xStep)(sqlite3_context*,int,sqlite3_value**),void (*xFinal)(sqlite3_context*)); + int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); + int (*data_count)(sqlite3_stmt*pStmt); + sqlite3 * (*db_handle)(sqlite3_stmt*); + int (*declare_vtab)(sqlite3*,const char*); + int (*enable_shared_cache)(int); + int (*errcode)(sqlite3*db); + const char * (*errmsg)(sqlite3*); + const void * (*errmsg16)(sqlite3*); + int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); + int (*expired)(sqlite3_stmt*); + int (*finalize)(sqlite3_stmt*pStmt); + void (*free)(void*); + void (*free_table)(char**result); + int (*get_autocommit)(sqlite3*); + void * (*get_auxdata)(sqlite3_context*,int); + int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); + int (*global_recover)(void); + void (*interruptx)(sqlite3*); + sqlite_int64 (*last_insert_rowid)(sqlite3*); + const char * (*libversion)(void); + int (*libversion_number)(void); + void *(*malloc)(int); + char * (*mprintf)(const char*,...); + int (*open)(const char*,sqlite3**); + int (*open16)(const void*,sqlite3**); + int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); + int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); + void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); + void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); + void *(*realloc)(void*,int); + int (*reset)(sqlite3_stmt*pStmt); + void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_double)(sqlite3_context*,double); + void (*result_error)(sqlite3_context*,const char*,int); + void (*result_error16)(sqlite3_context*,const void*,int); + void (*result_int)(sqlite3_context*,int); + void (*result_int64)(sqlite3_context*,sqlite_int64); + void (*result_null)(sqlite3_context*); + void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); + void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_value)(sqlite3_context*,sqlite3_value*); + void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); + int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*,const char*,const char*),void*); + void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); + char * (*snprintf)(int,char*,const char*,...); + int (*step)(sqlite3_stmt*); + int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*,char const**,char const**,int*,int*,int*); + void (*thread_cleanup)(void); + int (*total_changes)(sqlite3*); + void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); + int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); + void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*,sqlite_int64),void*); + void * (*user_data)(sqlite3_context*); + const void * (*value_blob)(sqlite3_value*); + int (*value_bytes)(sqlite3_value*); + int (*value_bytes16)(sqlite3_value*); + double (*value_double)(sqlite3_value*); + int (*value_int)(sqlite3_value*); + sqlite_int64 (*value_int64)(sqlite3_value*); + int (*value_numeric_type)(sqlite3_value*); + const unsigned char * (*value_text)(sqlite3_value*); + const void * (*value_text16)(sqlite3_value*); + const void * (*value_text16be)(sqlite3_value*); + const void * (*value_text16le)(sqlite3_value*); + int (*value_type)(sqlite3_value*); + char *(*vmprintf)(const char*,va_list); + /* Added ??? */ + int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); + /* Added by 3.3.13 */ + int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); + int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); + int (*clear_bindings)(sqlite3_stmt*); + /* Added by 3.4.1 */ + int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*,void (*xDestroy)(void *)); + /* Added by 3.5.0 */ + int (*bind_zeroblob)(sqlite3_stmt*,int,int); + int (*blob_bytes)(sqlite3_blob*); + int (*blob_close)(sqlite3_blob*); + int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64,int,sqlite3_blob**); + int (*blob_read)(sqlite3_blob*,void*,int,int); + int (*blob_write)(sqlite3_blob*,const void*,int,int); + int (*create_collation_v2)(sqlite3*,const char*,int,void*,int(*)(void*,int,const void*,int,const void*),void(*)(void*)); + int (*file_control)(sqlite3*,const char*,int,void*); + sqlite3_int64 (*memory_highwater)(int); + sqlite3_int64 (*memory_used)(void); + sqlite3_mutex *(*mutex_alloc)(int); + void (*mutex_enter)(sqlite3_mutex*); + void (*mutex_free)(sqlite3_mutex*); + void (*mutex_leave)(sqlite3_mutex*); + int (*mutex_try)(sqlite3_mutex*); + int (*open_v2)(const char*,sqlite3**,int,const char*); + int (*release_memory)(int); + void (*result_error_nomem)(sqlite3_context*); + void (*result_error_toobig)(sqlite3_context*); + int (*sleep)(int); + void (*soft_heap_limit)(int); + sqlite3_vfs *(*vfs_find)(const char*); + int (*vfs_register)(sqlite3_vfs*,int); + int (*vfs_unregister)(sqlite3_vfs*); +}; + +/* +** The following macros redefine the API routines so that they are +** redirected throught the global sqlite3_api structure. +** +** This header file is also used by the loadext.c source file +** (part of the main SQLite library - not an extension) so that +** it can get access to the sqlite3_api_routines structure +** definition. But the main library does not want to redefine +** the API. So the redefinition macros are only valid if the +** SQLITE_CORE macros is undefined. +*/ +#ifndef SQLITE_CORE +#define sqlite3_aggregate_context sqlite3_api->aggregate_context +#define sqlite3_aggregate_count sqlite3_api->aggregate_count +#define sqlite3_bind_blob sqlite3_api->bind_blob +#define sqlite3_bind_double sqlite3_api->bind_double +#define sqlite3_bind_int sqlite3_api->bind_int +#define sqlite3_bind_int64 sqlite3_api->bind_int64 +#define sqlite3_bind_null sqlite3_api->bind_null +#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count +#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index +#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name +#define sqlite3_bind_text sqlite3_api->bind_text +#define sqlite3_bind_text16 sqlite3_api->bind_text16 +#define sqlite3_bind_value sqlite3_api->bind_value +#define sqlite3_busy_handler sqlite3_api->busy_handler +#define sqlite3_busy_timeout sqlite3_api->busy_timeout +#define sqlite3_changes sqlite3_api->changes +#define sqlite3_close sqlite3_api->close +#define sqlite3_collation_needed sqlite3_api->collation_needed +#define sqlite3_collation_needed16 sqlite3_api->collation_needed16 +#define sqlite3_column_blob sqlite3_api->column_blob +#define sqlite3_column_bytes sqlite3_api->column_bytes +#define sqlite3_column_bytes16 sqlite3_api->column_bytes16 +#define sqlite3_column_count sqlite3_api->column_count +#define sqlite3_column_database_name sqlite3_api->column_database_name +#define sqlite3_column_database_name16 sqlite3_api->column_database_name16 +#define sqlite3_column_decltype sqlite3_api->column_decltype +#define sqlite3_column_decltype16 sqlite3_api->column_decltype16 +#define sqlite3_column_double sqlite3_api->column_double +#define sqlite3_column_int sqlite3_api->column_int +#define sqlite3_column_int64 sqlite3_api->column_int64 +#define sqlite3_column_name sqlite3_api->column_name +#define sqlite3_column_name16 sqlite3_api->column_name16 +#define sqlite3_column_origin_name sqlite3_api->column_origin_name +#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 +#define sqlite3_column_table_name sqlite3_api->column_table_name +#define sqlite3_column_table_name16 sqlite3_api->column_table_name16 +#define sqlite3_column_text sqlite3_api->column_text +#define sqlite3_column_text16 sqlite3_api->column_text16 +#define sqlite3_column_type sqlite3_api->column_type +#define sqlite3_column_value sqlite3_api->column_value +#define sqlite3_commit_hook sqlite3_api->commit_hook +#define sqlite3_complete sqlite3_api->complete +#define sqlite3_complete16 sqlite3_api->complete16 +#define sqlite3_create_collation sqlite3_api->create_collation +#define sqlite3_create_collation16 sqlite3_api->create_collation16 +#define sqlite3_create_function sqlite3_api->create_function +#define sqlite3_create_function16 sqlite3_api->create_function16 +#define sqlite3_create_module sqlite3_api->create_module +#define sqlite3_create_module_v2 sqlite3_api->create_module_v2 +#define sqlite3_data_count sqlite3_api->data_count +#define sqlite3_db_handle sqlite3_api->db_handle +#define sqlite3_declare_vtab sqlite3_api->declare_vtab +#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache +#define sqlite3_errcode sqlite3_api->errcode +#define sqlite3_errmsg sqlite3_api->errmsg +#define sqlite3_errmsg16 sqlite3_api->errmsg16 +#define sqlite3_exec sqlite3_api->exec +#define sqlite3_expired sqlite3_api->expired +#define sqlite3_finalize sqlite3_api->finalize +#define sqlite3_free sqlite3_api->free +#define sqlite3_free_table sqlite3_api->free_table +#define sqlite3_get_autocommit sqlite3_api->get_autocommit +#define sqlite3_get_auxdata sqlite3_api->get_auxdata +#define sqlite3_get_table sqlite3_api->get_table +#define sqlite3_global_recover sqlite3_api->global_recover +#define sqlite3_interrupt sqlite3_api->interruptx +#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid +#define sqlite3_libversion sqlite3_api->libversion +#define sqlite3_libversion_number sqlite3_api->libversion_number +#define sqlite3_malloc sqlite3_api->malloc +#define sqlite3_mprintf sqlite3_api->mprintf +#define sqlite3_open sqlite3_api->open +#define sqlite3_open16 sqlite3_api->open16 +#define sqlite3_prepare sqlite3_api->prepare +#define sqlite3_prepare16 sqlite3_api->prepare16 +#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 +#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 +#define sqlite3_profile sqlite3_api->profile +#define sqlite3_progress_handler sqlite3_api->progress_handler +#define sqlite3_realloc sqlite3_api->realloc +#define sqlite3_reset sqlite3_api->reset +#define sqlite3_result_blob sqlite3_api->result_blob +#define sqlite3_result_double sqlite3_api->result_double +#define sqlite3_result_error sqlite3_api->result_error +#define sqlite3_result_error16 sqlite3_api->result_error16 +#define sqlite3_result_int sqlite3_api->result_int +#define sqlite3_result_int64 sqlite3_api->result_int64 +#define sqlite3_result_null sqlite3_api->result_null +#define sqlite3_result_text sqlite3_api->result_text +#define sqlite3_result_text16 sqlite3_api->result_text16 +#define sqlite3_result_text16be sqlite3_api->result_text16be +#define sqlite3_result_text16le sqlite3_api->result_text16le +#define sqlite3_result_value sqlite3_api->result_value +#define sqlite3_rollback_hook sqlite3_api->rollback_hook +#define sqlite3_set_authorizer sqlite3_api->set_authorizer +#define sqlite3_set_auxdata sqlite3_api->set_auxdata +#define sqlite3_snprintf sqlite3_api->snprintf +#define sqlite3_step sqlite3_api->step +#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata +#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup +#define sqlite3_total_changes sqlite3_api->total_changes +#define sqlite3_trace sqlite3_api->trace +#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings +#define sqlite3_update_hook sqlite3_api->update_hook +#define sqlite3_user_data sqlite3_api->user_data +#define sqlite3_value_blob sqlite3_api->value_blob +#define sqlite3_value_bytes sqlite3_api->value_bytes +#define sqlite3_value_bytes16 sqlite3_api->value_bytes16 +#define sqlite3_value_double sqlite3_api->value_double +#define sqlite3_value_int sqlite3_api->value_int +#define sqlite3_value_int64 sqlite3_api->value_int64 +#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type +#define sqlite3_value_text sqlite3_api->value_text +#define sqlite3_value_text16 sqlite3_api->value_text16 +#define sqlite3_value_text16be sqlite3_api->value_text16be +#define sqlite3_value_text16le sqlite3_api->value_text16le +#define sqlite3_value_type sqlite3_api->value_type +#define sqlite3_vmprintf sqlite3_api->vmprintf +#define sqlite3_overload_function sqlite3_api->overload_function +#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 +#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 +#define sqlite3_clear_bindings sqlite3_api->clear_bindings +#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob +#define sqlite3_blob_bytes sqlite3_api->blob_bytes +#define sqlite3_blob_close sqlite3_api->blob_close +#define sqlite3_blob_open sqlite3_api->blob_open +#define sqlite3_blob_read sqlite3_api->blob_read +#define sqlite3_blob_write sqlite3_api->blob_write +#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 +#define sqlite3_file_control sqlite3_api->file_control +#define sqlite3_memory_highwater sqlite3_api->memory_highwater +#define sqlite3_memory_used sqlite3_api->memory_used +#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc +#define sqlite3_mutex_enter sqlite3_api->mutex_enter +#define sqlite3_mutex_free sqlite3_api->mutex_free +#define sqlite3_mutex_leave sqlite3_api->mutex_leave +#define sqlite3_mutex_try sqlite3_api->mutex_try +#define sqlite3_open_v2 sqlite3_api->open_v2 +#define sqlite3_release_memory sqlite3_api->release_memory +#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem +#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig +#define sqlite3_sleep sqlite3_api->sleep +#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit +#define sqlite3_vfs_find sqlite3_api->vfs_find +#define sqlite3_vfs_register sqlite3_api->vfs_register +#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister +#endif /* SQLITE_CORE */ + +#define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api; +#define SQLITE_EXTENSION_INIT2(v) sqlite3_api = v; + +#endif /* _SQLITE3EXT_H_ */ diff --git a/libraries/sqlite/unix/sqlite-3.5.1/src/sqliteInt.h b/libraries/sqlite/unix/sqlite-3.5.1/src/sqliteInt.h new file mode 100644 index 0000000000..2170e53912 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/src/sqliteInt.h @@ -0,0 +1,1972 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Internal interface definitions for SQLite. +** +** @(#) $Id: sqliteInt.h,v 1.613 2007/10/03 08:46:45 danielk1977 Exp $ +*/ +#ifndef _SQLITEINT_H_ +#define _SQLITEINT_H_ +#include "sqliteLimit.h" + +/* +** For testing purposes, the various size limit constants are really +** variables that we can modify in the testfixture. +*/ +#ifdef SQLITE_TEST + #undef SQLITE_MAX_LENGTH + #undef SQLITE_MAX_COLUMN + #undef SQLITE_MAX_SQL_LENGTH + #undef SQLITE_MAX_EXPR_DEPTH + #undef SQLITE_MAX_COMPOUND_SELECT + #undef SQLITE_MAX_VDBE_OP + #undef SQLITE_MAX_FUNCTION_ARG + #undef SQLITE_MAX_VARIABLE_NUMBER + #undef SQLITE_MAX_PAGE_SIZE + #undef SQLITE_MAX_PAGE_COUNT + #undef SQLITE_MAX_LIKE_PATTERN_LENGTH + + #define SQLITE_MAX_LENGTH sqlite3MAX_LENGTH + #define SQLITE_MAX_COLUMN sqlite3MAX_COLUMN + #define SQLITE_MAX_SQL_LENGTH sqlite3MAX_SQL_LENGTH + #define SQLITE_MAX_EXPR_DEPTH sqlite3MAX_EXPR_DEPTH + #define SQLITE_MAX_COMPOUND_SELECT sqlite3MAX_COMPOUND_SELECT + #define SQLITE_MAX_VDBE_OP sqlite3MAX_VDBE_OP + #define SQLITE_MAX_FUNCTION_ARG sqlite3MAX_FUNCTION_ARG + #define SQLITE_MAX_VARIABLE_NUMBER sqlite3MAX_VARIABLE_NUMBER + #define SQLITE_MAX_PAGE_SIZE sqlite3MAX_PAGE_SIZE + #define SQLITE_MAX_PAGE_COUNT sqlite3MAX_PAGE_COUNT + #define SQLITE_MAX_LIKE_PATTERN_LENGTH sqlite3MAX_LIKE_PATTERN_LENGTH + + extern int sqlite3MAX_LENGTH; + extern int sqlite3MAX_COLUMN; + extern int sqlite3MAX_SQL_LENGTH; + extern int sqlite3MAX_EXPR_DEPTH; + extern int sqlite3MAX_COMPOUND_SELECT; + extern int sqlite3MAX_VDBE_OP; + extern int sqlite3MAX_FUNCTION_ARG; + extern int sqlite3MAX_VARIABLE_NUMBER; + extern int sqlite3MAX_PAGE_SIZE; + extern int sqlite3MAX_PAGE_COUNT; + extern int sqlite3MAX_LIKE_PATTERN_LENGTH; +#endif + + +/* +** The SQLITE_THREADSAFE macro must be defined as either 0 or 1. +** Older versions of SQLite used an optional THREADSAFE macro. +** We support that for legacy +*/ +#if !defined(SQLITE_THREADSAFE) +#if defined(THREADSAFE) +# define SQLITE_THREADSAFE THREADSAFE +#else +# define SQLITE_THREADSAFE 1 +#endif +#endif + +/* +** We need to define _XOPEN_SOURCE as follows in order to enable +** recursive mutexes on most unix systems. But Mac OS X is different. +** The _XOPEN_SOURCE define causes problems for Mac OS X we are told, +** so it is omitted there. See ticket #2673. +** +** Later we learn that _XOPEN_SOURCE is poorly or incorrectly +** implemented on some systems. So we avoid defining it at all +** if it is already defined or if it is unneeded because we are +** not doing a threadsafe build. Ticket #2681. +*/ +#if !defined(_XOPEN_SOURCE) && !defined(__MACOS__) && SQLITE_THREADSAFE +# define _XOPEN_SOURCE 500 /* Needed to enable pthread recursive mutexes */ +#endif + +#if defined(SQLITE_TCL) || defined(TCLSH) +# include +#endif + +/* +** Many people are failing to set -DNDEBUG=1 when compiling SQLite. +** Setting NDEBUG makes the code smaller and run faster. So the following +** lines are added to automatically set NDEBUG unless the -DSQLITE_DEBUG=1 +** option is set. Thus NDEBUG becomes an opt-in rather than an opt-out +** feature. +*/ +#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) +# define NDEBUG 1 +#endif + +/* +** These #defines should enable >2GB file support on Posix if the +** underlying operating system supports it. If the OS lacks +** large file support, or if the OS is windows, these should be no-ops. +** +** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch +** on the compiler command line. This is necessary if you are compiling +** on a recent machine (ex: RedHat 7.2) but you want your code to work +** on an older machine (ex: RedHat 6.0). If you compile on RedHat 7.2 +** without this option, LFS is enable. But LFS does not exist in the kernel +** in RedHat 6.0, so the code won't work. Hence, for maximum binary +** portability you should omit LFS. +** +** Similar is true for MacOS. LFS is only supported on MacOS 9 and later. +*/ +#ifndef SQLITE_DISABLE_LFS +# define _LARGE_FILE 1 +# ifndef _FILE_OFFSET_BITS +# define _FILE_OFFSET_BITS 64 +# endif +# define _LARGEFILE_SOURCE 1 +#endif + +#include "sqlite3.h" +#include "hash.h" +#include "parse.h" +#include +#include +#include +#include +#include + +#define sqlite3_isnan(X) ((X)!=(X)) + +/* +** If compiling for a processor that lacks floating point support, +** substitute integer for floating-point +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# define double sqlite_int64 +# define LONGDOUBLE_TYPE sqlite_int64 +# ifndef SQLITE_BIG_DBL +# define SQLITE_BIG_DBL (0x7fffffffffffffff) +# endif +# define SQLITE_OMIT_DATETIME_FUNCS 1 +# define SQLITE_OMIT_TRACE 1 +# undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT +#endif +#ifndef SQLITE_BIG_DBL +# define SQLITE_BIG_DBL (1e99) +#endif + +/* +** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0 +** afterward. Having this macro allows us to cause the C compiler +** to omit code used by TEMP tables without messy #ifndef statements. +*/ +#ifdef SQLITE_OMIT_TEMPDB +#define OMIT_TEMPDB 1 +#else +#define OMIT_TEMPDB 0 +#endif + +/* +** If the following macro is set to 1, then NULL values are considered +** distinct when determining whether or not two entries are the same +** in a UNIQUE index. This is the way PostgreSQL, Oracle, DB2, MySQL, +** OCELOT, and Firebird all work. The SQL92 spec explicitly says this +** is the way things are suppose to work. +** +** If the following macro is set to 0, the NULLs are indistinct for +** a UNIQUE index. In this mode, you can only have a single NULL entry +** for a column declared UNIQUE. This is the way Informix and SQL Server +** work. +*/ +#define NULL_DISTINCT_FOR_UNIQUE 1 + +/* +** The "file format" number is an integer that is incremented whenever +** the VDBE-level file format changes. The following macros define the +** the default file format for new databases and the maximum file format +** that the library can read. +*/ +#define SQLITE_MAX_FILE_FORMAT 4 +#ifndef SQLITE_DEFAULT_FILE_FORMAT +# define SQLITE_DEFAULT_FILE_FORMAT 1 +#endif + +/* +** Provide a default value for TEMP_STORE in case it is not specified +** on the command-line +*/ +#ifndef TEMP_STORE +# define TEMP_STORE 1 +#endif + +/* +** GCC does not define the offsetof() macro so we'll have to do it +** ourselves. +*/ +#ifndef offsetof +#define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) +#endif + +/* +** Check to see if this machine uses EBCDIC. (Yes, believe it or +** not, there are still machines out there that use EBCDIC.) +*/ +#if 'A' == '\301' +# define SQLITE_EBCDIC 1 +#else +# define SQLITE_ASCII 1 +#endif + +/* +** Integers of known sizes. These typedefs might change for architectures +** where the sizes very. Preprocessor macros are available so that the +** types can be conveniently redefined at compile-type. Like this: +** +** cc '-DUINTPTR_TYPE=long long int' ... +*/ +#ifndef UINT32_TYPE +# define UINT32_TYPE unsigned int +#endif +#ifndef UINT16_TYPE +# define UINT16_TYPE unsigned short int +#endif +#ifndef INT16_TYPE +# define INT16_TYPE short int +#endif +#ifndef UINT8_TYPE +# define UINT8_TYPE unsigned char +#endif +#ifndef INT8_TYPE +# define INT8_TYPE signed char +#endif +#ifndef LONGDOUBLE_TYPE +# define LONGDOUBLE_TYPE long double +#endif +typedef sqlite_int64 i64; /* 8-byte signed integer */ +typedef sqlite_uint64 u64; /* 8-byte unsigned integer */ +typedef UINT32_TYPE u32; /* 4-byte unsigned integer */ +typedef UINT16_TYPE u16; /* 2-byte unsigned integer */ +typedef INT16_TYPE i16; /* 2-byte signed integer */ +typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ +typedef UINT8_TYPE i8; /* 1-byte signed integer */ + +/* +** Macros to determine whether the machine is big or little endian, +** evaluated at runtime. +*/ +#ifdef SQLITE_AMALGAMATION +const int sqlite3One; +#else +extern const int sqlite3one; +#endif +#if defined(i386) || defined(__i386__) || defined(_M_IX86) +# define SQLITE_BIGENDIAN 0 +# define SQLITE_LITTLEENDIAN 1 +# define SQLITE_UTF16NATIVE SQLITE_UTF16LE +#else +# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0) +# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1) +# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE) +#endif + +/* +** An instance of the following structure is used to store the busy-handler +** callback for a given sqlite handle. +** +** The sqlite.busyHandler member of the sqlite struct contains the busy +** callback for the database handle. Each pager opened via the sqlite +** handle is passed a pointer to sqlite.busyHandler. The busy-handler +** callback is currently invoked only from within pager.c. +*/ +typedef struct BusyHandler BusyHandler; +struct BusyHandler { + int (*xFunc)(void *,int); /* The busy callback */ + void *pArg; /* First arg to busy callback */ + int nBusy; /* Incremented with each busy call */ +}; + +/* +** Defer sourcing vdbe.h and btree.h until after the "u8" and +** "BusyHandler typedefs. +*/ +#include "btree.h" +#include "vdbe.h" +#include "pager.h" + + +/* +** Name of the master database table. The master database table +** is a special table that holds the names and attributes of all +** user tables and indices. +*/ +#define MASTER_NAME "sqlite_master" +#define TEMP_MASTER_NAME "sqlite_temp_master" + +/* +** The root-page of the master database table. +*/ +#define MASTER_ROOT 1 + +/* +** The name of the schema table. +*/ +#define SCHEMA_TABLE(x) ((!OMIT_TEMPDB)&&(x==1)?TEMP_MASTER_NAME:MASTER_NAME) + +/* +** A convenience macro that returns the number of elements in +** an array. +*/ +#define ArraySize(X) (sizeof(X)/sizeof(X[0])) + +/* +** Forward references to structures +*/ +typedef struct AggInfo AggInfo; +typedef struct AuthContext AuthContext; +typedef struct CollSeq CollSeq; +typedef struct Column Column; +typedef struct Db Db; +typedef struct Schema Schema; +typedef struct Expr Expr; +typedef struct ExprList ExprList; +typedef struct FKey FKey; +typedef struct FuncDef FuncDef; +typedef struct IdList IdList; +typedef struct Index Index; +typedef struct KeyClass KeyClass; +typedef struct KeyInfo KeyInfo; +typedef struct Module Module; +typedef struct NameContext NameContext; +typedef struct Parse Parse; +typedef struct Select Select; +typedef struct SrcList SrcList; +typedef struct Table Table; +typedef struct TableLock TableLock; +typedef struct Token Token; +typedef struct TriggerStack TriggerStack; +typedef struct TriggerStep TriggerStep; +typedef struct Trigger Trigger; +typedef struct WhereInfo WhereInfo; +typedef struct WhereLevel WhereLevel; + +#include "os.h" +#include "mutex.h" + +/* +** Each database file to be accessed by the system is an instance +** of the following structure. There are normally two of these structures +** in the sqlite.aDb[] array. aDb[0] is the main database file and +** aDb[1] is the database file used to hold temporary tables. Additional +** databases may be attached. +*/ +struct Db { + char *zName; /* Name of this database */ + Btree *pBt; /* The B*Tree structure for this database file */ + u8 inTrans; /* 0: not writable. 1: Transaction. 2: Checkpoint */ + u8 safety_level; /* How aggressive at synching data to disk */ + void *pAux; /* Auxiliary data. Usually NULL */ + void (*xFreeAux)(void*); /* Routine to free pAux */ + Schema *pSchema; /* Pointer to database schema (possibly shared) */ +}; + +/* +** An instance of the following structure stores a database schema. +** +** If there are no virtual tables configured in this schema, the +** Schema.db variable is set to NULL. After the first virtual table +** has been added, it is set to point to the database connection +** used to create the connection. Once a virtual table has been +** added to the Schema structure and the Schema.db variable populated, +** only that database connection may use the Schema to prepare +** statements. +*/ +struct Schema { + int schema_cookie; /* Database schema version number for this file */ + Hash tblHash; /* All tables indexed by name */ + Hash idxHash; /* All (named) indices indexed by name */ + Hash trigHash; /* All triggers indexed by name */ + Hash aFKey; /* Foreign keys indexed by to-table */ + Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */ + u8 file_format; /* Schema format version for this file */ + u8 enc; /* Text encoding used by this database */ + u16 flags; /* Flags associated with this schema */ + int cache_size; /* Number of pages to use in the cache */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3 *db; /* "Owner" connection. See comment above */ +#endif +}; + +/* +** These macros can be used to test, set, or clear bits in the +** Db.flags field. +*/ +#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))==(P)) +#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))!=0) +#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->flags|=(P) +#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->flags&=~(P) + +/* +** Allowed values for the DB.flags field. +** +** The DB_SchemaLoaded flag is set after the database schema has been +** read into internal hash tables. +** +** DB_UnresetViews means that one or more views have column names that +** have been filled out. If the schema changes, these column names might +** changes and so the view will need to be reset. +*/ +#define DB_SchemaLoaded 0x0001 /* The schema has been loaded */ +#define DB_UnresetViews 0x0002 /* Some views have defined column names */ +#define DB_Empty 0x0004 /* The file is empty (length 0 bytes) */ + + +/* +** Each database is an instance of the following structure. +** +** The sqlite.lastRowid records the last insert rowid generated by an +** insert statement. Inserts on views do not affect its value. Each +** trigger has its own context, so that lastRowid can be updated inside +** triggers as usual. The previous value will be restored once the trigger +** exits. Upon entering a before or instead of trigger, lastRowid is no +** longer (since after version 2.8.12) reset to -1. +** +** The sqlite.nChange does not count changes within triggers and keeps no +** context. It is reset at start of sqlite3_exec. +** The sqlite.lsChange represents the number of changes made by the last +** insert, update, or delete statement. It remains constant throughout the +** length of a statement and is then updated by OP_SetCounts. It keeps a +** context stack just like lastRowid so that the count of changes +** within a trigger is not seen outside the trigger. Changes to views do not +** affect the value of lsChange. +** The sqlite.csChange keeps track of the number of current changes (since +** the last statement) and is used to update sqlite_lsChange. +** +** The member variables sqlite.errCode, sqlite.zErrMsg and sqlite.zErrMsg16 +** store the most recent error code and, if applicable, string. The +** internal function sqlite3Error() is used to set these variables +** consistently. +*/ +struct sqlite3 { + sqlite3_vfs *pVfs; /* OS Interface */ + int nDb; /* Number of backends currently in use */ + Db *aDb; /* All backends */ + int flags; /* Miscellanous flags. See below */ + int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */ + int errCode; /* Most recent error code (SQLITE_*) */ + int errMask; /* & result codes with this before returning */ + u8 autoCommit; /* The auto-commit flag. */ + u8 temp_store; /* 1: file 2: memory 0: default */ + u8 mallocFailed; /* True if we have seen a malloc failure */ + int nTable; /* Number of tables in the database */ + CollSeq *pDfltColl; /* The default collating sequence (BINARY) */ + i64 lastRowid; /* ROWID of most recent insert (see above) */ + i64 priorNewRowid; /* Last randomly generated ROWID */ + int magic; /* Magic number for detect library misuse */ + int nChange; /* Value returned by sqlite3_changes() */ + int nTotalChange; /* Value returned by sqlite3_total_changes() */ + sqlite3_mutex *mutex; /* Connection mutex */ + struct sqlite3InitInfo { /* Information used during initialization */ + int iDb; /* When back is being initialized */ + int newTnum; /* Rootpage of table being initialized */ + u8 busy; /* TRUE if currently initializing */ + } init; + int nExtension; /* Number of loaded extensions */ + void **aExtension; /* Array of shared libraray handles */ + struct Vdbe *pVdbe; /* List of active virtual machines */ + int activeVdbeCnt; /* Number of vdbes currently executing */ + void (*xTrace)(void*,const char*); /* Trace function */ + void *pTraceArg; /* Argument to the trace function */ + void (*xProfile)(void*,const char*,u64); /* Profiling function */ + void *pProfileArg; /* Argument to profile function */ + void *pCommitArg; /* Argument to xCommitCallback() */ + int (*xCommitCallback)(void*); /* Invoked at every commit. */ + void *pRollbackArg; /* Argument to xRollbackCallback() */ + void (*xRollbackCallback)(void*); /* Invoked at every commit. */ + void *pUpdateArg; + void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); + void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*); + void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*); + void *pCollNeededArg; + sqlite3_value *pErr; /* Most recent error message */ + char *zErrMsg; /* Most recent error message (UTF-8 encoded) */ + char *zErrMsg16; /* Most recent error message (UTF-16 encoded) */ + union { + int isInterrupted; /* True if sqlite3_interrupt has been called */ + double notUsed1; /* Spacer */ + } u1; +#ifndef SQLITE_OMIT_AUTHORIZATION + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); + /* Access authorization function */ + void *pAuthArg; /* 1st argument to the access auth function */ +#endif +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + int (*xProgress)(void *); /* The progress callback */ + void *pProgressArg; /* Argument to the progress callback */ + int nProgressOps; /* Number of opcodes for progress callback */ +#endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + Hash aModule; /* populated by sqlite3_create_module() */ + Table *pVTab; /* vtab with active Connect/Create method */ + sqlite3_vtab **aVTrans; /* Virtual tables with open transactions */ + int nVTrans; /* Allocated size of aVTrans */ +#endif + Hash aFunc; /* All functions that can be in SQL exprs */ + Hash aCollSeq; /* All collating sequences */ + BusyHandler busyHandler; /* Busy callback */ + int busyTimeout; /* Busy handler timeout, in msec */ + Db aDbStatic[2]; /* Static space for the 2 default backends */ +#ifdef SQLITE_SSE + sqlite3_stmt *pFetch; /* Used by SSE to fetch stored statements */ +#endif + u8 dfltLockMode; /* Default locking-mode for attached dbs */ +}; + +/* +** A macro to discover the encoding of a database. +*/ +#define ENC(db) ((db)->aDb[0].pSchema->enc) + +/* +** Possible values for the sqlite.flags and or Db.flags fields. +** +** On sqlite.flags, the SQLITE_InTrans value means that we have +** executed a BEGIN. On Db.flags, SQLITE_InTrans means a statement +** transaction is active on that particular database file. +*/ +#define SQLITE_VdbeTrace 0x00000001 /* True to trace VDBE execution */ +#define SQLITE_InTrans 0x00000008 /* True if in a transaction */ +#define SQLITE_InternChanges 0x00000010 /* Uncommitted Hash table changes */ +#define SQLITE_FullColNames 0x00000020 /* Show full column names on SELECT */ +#define SQLITE_ShortColNames 0x00000040 /* Show short columns names */ +#define SQLITE_CountRows 0x00000080 /* Count rows changed by INSERT, */ + /* DELETE, or UPDATE and return */ + /* the count using a callback. */ +#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ + /* result set is empty */ +#define SQLITE_SqlTrace 0x00000200 /* Debug print SQL as it executes */ +#define SQLITE_VdbeListing 0x00000400 /* Debug listings of VDBE programs */ +#define SQLITE_WriteSchema 0x00000800 /* OK to update SQLITE_MASTER */ +#define SQLITE_NoReadlock 0x00001000 /* Readlocks are omitted when + ** accessing read-only databases */ +#define SQLITE_IgnoreChecks 0x00002000 /* Do not enforce check constraints */ +#define SQLITE_ReadUncommitted 0x00004000 /* For shared-cache mode */ +#define SQLITE_LegacyFileFmt 0x00008000 /* Create new databases in format 1 */ +#define SQLITE_FullFSync 0x00010000 /* Use full fsync on the backend */ +#define SQLITE_LoadExtension 0x00020000 /* Enable load_extension */ + +#define SQLITE_RecoveryMode 0x00040000 /* Ignore schema errors */ +#define SQLITE_SharedCache 0x00080000 /* Cache sharing is enabled */ +#define SQLITE_Vtab 0x00100000 /* There exists a virtual table */ + +/* +** Possible values for the sqlite.magic field. +** The numbers are obtained at random and have no special meaning, other +** than being distinct from one another. +*/ +#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */ +#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */ +#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */ +#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */ + +/* +** Each SQL function is defined by an instance of the following +** structure. A pointer to this structure is stored in the sqlite.aFunc +** hash table. When multiple functions have the same name, the hash table +** points to a linked list of these structures. +*/ +struct FuncDef { + i16 nArg; /* Number of arguments. -1 means unlimited */ + u8 iPrefEnc; /* Preferred text encoding (SQLITE_UTF8, 16LE, 16BE) */ + u8 needCollSeq; /* True if sqlite3GetFuncCollSeq() might be called */ + u8 flags; /* Some combination of SQLITE_FUNC_* */ + void *pUserData; /* User data parameter */ + FuncDef *pNext; /* Next function with same name */ + void (*xFunc)(sqlite3_context*,int,sqlite3_value**); /* Regular function */ + void (*xStep)(sqlite3_context*,int,sqlite3_value**); /* Aggregate step */ + void (*xFinalize)(sqlite3_context*); /* Aggregate finializer */ + char zName[1]; /* SQL name of the function. MUST BE LAST */ +}; + +/* +** Each SQLite module (virtual table definition) is defined by an +** instance of the following structure, stored in the sqlite3.aModule +** hash table. +*/ +struct Module { + const sqlite3_module *pModule; /* Callback pointers */ + const char *zName; /* Name passed to create_module() */ + void *pAux; /* pAux passed to create_module() */ + void (*xDestroy)(void *); /* Module destructor function */ +}; + +/* +** Possible values for FuncDef.flags +*/ +#define SQLITE_FUNC_LIKE 0x01 /* Candidate for the LIKE optimization */ +#define SQLITE_FUNC_CASE 0x02 /* Case-sensitive LIKE-type function */ +#define SQLITE_FUNC_EPHEM 0x04 /* Ephermeral. Delete with VDBE */ + +/* +** information about each column of an SQL table is held in an instance +** of this structure. +*/ +struct Column { + char *zName; /* Name of this column */ + Expr *pDflt; /* Default value of this column */ + char *zType; /* Data type for this column */ + char *zColl; /* Collating sequence. If NULL, use the default */ + u8 notNull; /* True if there is a NOT NULL constraint */ + u8 isPrimKey; /* True if this column is part of the PRIMARY KEY */ + char affinity; /* One of the SQLITE_AFF_... values */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + u8 isHidden; /* True if this column is 'hidden' */ +#endif +}; + +/* +** A "Collating Sequence" is defined by an instance of the following +** structure. Conceptually, a collating sequence consists of a name and +** a comparison routine that defines the order of that sequence. +** +** There may two seperate implementations of the collation function, one +** that processes text in UTF-8 encoding (CollSeq.xCmp) and another that +** processes text encoded in UTF-16 (CollSeq.xCmp16), using the machine +** native byte order. When a collation sequence is invoked, SQLite selects +** the version that will require the least expensive encoding +** translations, if any. +** +** The CollSeq.pUser member variable is an extra parameter that passed in +** as the first argument to the UTF-8 comparison function, xCmp. +** CollSeq.pUser16 is the equivalent for the UTF-16 comparison function, +** xCmp16. +** +** If both CollSeq.xCmp and CollSeq.xCmp16 are NULL, it means that the +** collating sequence is undefined. Indices built on an undefined +** collating sequence may not be read or written. +*/ +struct CollSeq { + char *zName; /* Name of the collating sequence, UTF-8 encoded */ + u8 enc; /* Text encoding handled by xCmp() */ + u8 type; /* One of the SQLITE_COLL_... values below */ + void *pUser; /* First argument to xCmp() */ + int (*xCmp)(void*,int, const void*, int, const void*); + void (*xDel)(void*); /* Destructor for pUser */ +}; + +/* +** Allowed values of CollSeq flags: +*/ +#define SQLITE_COLL_BINARY 1 /* The default memcmp() collating sequence */ +#define SQLITE_COLL_NOCASE 2 /* The built-in NOCASE collating sequence */ +#define SQLITE_COLL_REVERSE 3 /* The built-in REVERSE collating sequence */ +#define SQLITE_COLL_USER 0 /* Any other user-defined collating sequence */ + +/* +** A sort order can be either ASC or DESC. +*/ +#define SQLITE_SO_ASC 0 /* Sort in ascending order */ +#define SQLITE_SO_DESC 1 /* Sort in ascending order */ + +/* +** Column affinity types. +** +** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and +** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve +** the speed a little by number the values consecutively. +** +** But rather than start with 0 or 1, we begin with 'a'. That way, +** when multiple affinity types are concatenated into a string and +** used as the P3 operand, they will be more readable. +** +** Note also that the numeric types are grouped together so that testing +** for a numeric type is a single comparison. +*/ +#define SQLITE_AFF_TEXT 'a' +#define SQLITE_AFF_NONE 'b' +#define SQLITE_AFF_NUMERIC 'c' +#define SQLITE_AFF_INTEGER 'd' +#define SQLITE_AFF_REAL 'e' + +#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) + +/* +** Each SQL table is represented in memory by an instance of the +** following structure. +** +** Table.zName is the name of the table. The case of the original +** CREATE TABLE statement is stored, but case is not significant for +** comparisons. +** +** Table.nCol is the number of columns in this table. Table.aCol is a +** pointer to an array of Column structures, one for each column. +** +** If the table has an INTEGER PRIMARY KEY, then Table.iPKey is the index of +** the column that is that key. Otherwise Table.iPKey is negative. Note +** that the datatype of the PRIMARY KEY must be INTEGER for this field to +** be set. An INTEGER PRIMARY KEY is used as the rowid for each row of +** the table. If a table has no INTEGER PRIMARY KEY, then a random rowid +** is generated for each row of the table. Table.hasPrimKey is true if +** the table has any PRIMARY KEY, INTEGER or otherwise. +** +** Table.tnum is the page number for the root BTree page of the table in the +** database file. If Table.iDb is the index of the database table backend +** in sqlite.aDb[]. 0 is for the main database and 1 is for the file that +** holds temporary tables and indices. If Table.isEphem +** is true, then the table is stored in a file that is automatically deleted +** when the VDBE cursor to the table is closed. In this case Table.tnum +** refers VDBE cursor number that holds the table open, not to the root +** page number. Transient tables are used to hold the results of a +** sub-query that appears instead of a real table name in the FROM clause +** of a SELECT statement. +*/ +struct Table { + char *zName; /* Name of the table */ + int nCol; /* Number of columns in this table */ + Column *aCol; /* Information about each column */ + int iPKey; /* If not less then 0, use aCol[iPKey] as the primary key */ + Index *pIndex; /* List of SQL indexes on this table. */ + int tnum; /* Root BTree node for this table (see note above) */ + Select *pSelect; /* NULL for tables. Points to definition if a view. */ + int nRef; /* Number of pointers to this Table */ + Trigger *pTrigger; /* List of SQL triggers on this table */ + FKey *pFKey; /* Linked list of all foreign keys in this table */ + char *zColAff; /* String defining the affinity of each column */ +#ifndef SQLITE_OMIT_CHECK + Expr *pCheck; /* The AND of all CHECK constraints */ +#endif +#ifndef SQLITE_OMIT_ALTERTABLE + int addColOffset; /* Offset in CREATE TABLE statement to add a new column */ +#endif + u8 readOnly; /* True if this table should not be written by the user */ + u8 isEphem; /* True if created using OP_OpenEphermeral */ + u8 hasPrimKey; /* True if there exists a primary key */ + u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ + u8 autoInc; /* True if the integer primary key is autoincrement */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + u8 isVirtual; /* True if this is a virtual table */ + u8 isCommit; /* True once the CREATE TABLE has been committed */ + Module *pMod; /* Pointer to the implementation of the module */ + sqlite3_vtab *pVtab; /* Pointer to the module instance */ + int nModuleArg; /* Number of arguments to the module */ + char **azModuleArg; /* Text of all module args. [0] is module name */ +#endif + Schema *pSchema; /* Schema that contains this table */ +}; + +/* +** Test to see whether or not a table is a virtual table. This is +** done as a macro so that it will be optimized out when virtual +** table support is omitted from the build. +*/ +#ifndef SQLITE_OMIT_VIRTUALTABLE +# define IsVirtual(X) ((X)->isVirtual) +# define IsHiddenColumn(X) ((X)->isHidden) +#else +# define IsVirtual(X) 0 +# define IsHiddenColumn(X) 0 +#endif + +/* +** Each foreign key constraint is an instance of the following structure. +** +** A foreign key is associated with two tables. The "from" table is +** the table that contains the REFERENCES clause that creates the foreign +** key. The "to" table is the table that is named in the REFERENCES clause. +** Consider this example: +** +** CREATE TABLE ex1( +** a INTEGER PRIMARY KEY, +** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x) +** ); +** +** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2". +** +** Each REFERENCES clause generates an instance of the following structure +** which is attached to the from-table. The to-table need not exist when +** the from-table is created. The existance of the to-table is not checked +** until an attempt is made to insert data into the from-table. +** +** The sqlite.aFKey hash table stores pointers to this structure +** given the name of a to-table. For each to-table, all foreign keys +** associated with that table are on a linked list using the FKey.pNextTo +** field. +*/ +struct FKey { + Table *pFrom; /* The table that constains the REFERENCES clause */ + FKey *pNextFrom; /* Next foreign key in pFrom */ + char *zTo; /* Name of table that the key points to */ + FKey *pNextTo; /* Next foreign key that points to zTo */ + int nCol; /* Number of columns in this key */ + struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ + int iFrom; /* Index of column in pFrom */ + char *zCol; /* Name of column in zTo. If 0 use PRIMARY KEY */ + } *aCol; /* One entry for each of nCol column s */ + u8 isDeferred; /* True if constraint checking is deferred till COMMIT */ + u8 updateConf; /* How to resolve conflicts that occur on UPDATE */ + u8 deleteConf; /* How to resolve conflicts that occur on DELETE */ + u8 insertConf; /* How to resolve conflicts that occur on INSERT */ +}; + +/* +** SQLite supports many different ways to resolve a constraint +** error. ROLLBACK processing means that a constraint violation +** causes the operation in process to fail and for the current transaction +** to be rolled back. ABORT processing means the operation in process +** fails and any prior changes from that one operation are backed out, +** but the transaction is not rolled back. FAIL processing means that +** the operation in progress stops and returns an error code. But prior +** changes due to the same operation are not backed out and no rollback +** occurs. IGNORE means that the particular row that caused the constraint +** error is not inserted or updated. Processing continues and no error +** is returned. REPLACE means that preexisting database rows that caused +** a UNIQUE constraint violation are removed so that the new insert or +** update can proceed. Processing continues and no error is reported. +** +** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys. +** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the +** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign +** key is set to NULL. CASCADE means that a DELETE or UPDATE of the +** referenced table row is propagated into the row that holds the +** foreign key. +** +** The following symbolic values are used to record which type +** of action to take. +*/ +#define OE_None 0 /* There is no constraint to check */ +#define OE_Rollback 1 /* Fail the operation and rollback the transaction */ +#define OE_Abort 2 /* Back out changes but do no rollback transaction */ +#define OE_Fail 3 /* Stop the operation but leave all prior changes */ +#define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */ +#define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */ + +#define OE_Restrict 6 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */ +#define OE_SetNull 7 /* Set the foreign key value to NULL */ +#define OE_SetDflt 8 /* Set the foreign key value to its default */ +#define OE_Cascade 9 /* Cascade the changes */ + +#define OE_Default 99 /* Do whatever the default action is */ + + +/* +** An instance of the following structure is passed as the first +** argument to sqlite3VdbeKeyCompare and is used to control the +** comparison of the two index keys. +** +** If the KeyInfo.incrKey value is true and the comparison would +** otherwise be equal, then return a result as if the second key +** were larger. +*/ +struct KeyInfo { + sqlite3 *db; /* The database connection */ + u8 enc; /* Text encoding - one of the TEXT_Utf* values */ + u8 incrKey; /* Increase 2nd key by epsilon before comparison */ + int nField; /* Number of entries in aColl[] */ + u8 *aSortOrder; /* If defined an aSortOrder[i] is true, sort DESC */ + CollSeq *aColl[1]; /* Collating sequence for each term of the key */ +}; + +/* +** Each SQL index is represented in memory by an +** instance of the following structure. +** +** The columns of the table that are to be indexed are described +** by the aiColumn[] field of this structure. For example, suppose +** we have the following table and index: +** +** CREATE TABLE Ex1(c1 int, c2 int, c3 text); +** CREATE INDEX Ex2 ON Ex1(c3,c1); +** +** In the Table structure describing Ex1, nCol==3 because there are +** three columns in the table. In the Index structure describing +** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed. +** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the +** first column to be indexed (c3) has an index of 2 in Ex1.aCol[]. +** The second column to be indexed (c1) has an index of 0 in +** Ex1.aCol[], hence Ex2.aiColumn[1]==0. +** +** The Index.onError field determines whether or not the indexed columns +** must be unique and what to do if they are not. When Index.onError=OE_None, +** it means this is not a unique index. Otherwise it is a unique index +** and the value of Index.onError indicate the which conflict resolution +** algorithm to employ whenever an attempt is made to insert a non-unique +** element. +*/ +struct Index { + char *zName; /* Name of this index */ + int nColumn; /* Number of columns in the table used by this index */ + int *aiColumn; /* Which columns are used by this index. 1st is 0 */ + unsigned *aiRowEst; /* Result of ANALYZE: Est. rows selected by each column */ + Table *pTable; /* The SQL table being indexed */ + int tnum; /* Page containing root of this index in database file */ + u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ + u8 autoIndex; /* True if is automatically created (ex: by UNIQUE) */ + char *zColAff; /* String defining the affinity of each column */ + Index *pNext; /* The next index associated with the same table */ + Schema *pSchema; /* Schema containing this index */ + u8 *aSortOrder; /* Array of size Index.nColumn. True==DESC, False==ASC */ + char **azColl; /* Array of collation sequence names for index */ +}; + +/* +** Each token coming out of the lexer is an instance of +** this structure. Tokens are also used as part of an expression. +** +** Note if Token.z==0 then Token.dyn and Token.n are undefined and +** may contain random values. Do not make any assuptions about Token.dyn +** and Token.n when Token.z==0. +*/ +struct Token { + const unsigned char *z; /* Text of the token. Not NULL-terminated! */ + unsigned dyn : 1; /* True for malloced memory, false for static */ + unsigned n : 31; /* Number of characters in this token */ +}; + +/* +** An instance of this structure contains information needed to generate +** code for a SELECT that contains aggregate functions. +** +** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a +** pointer to this structure. The Expr.iColumn field is the index in +** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate +** code for that node. +** +** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the +** original Select structure that describes the SELECT statement. These +** fields do not need to be freed when deallocating the AggInfo structure. +*/ +struct AggInfo { + u8 directMode; /* Direct rendering mode means take data directly + ** from source tables rather than from accumulators */ + u8 useSortingIdx; /* In direct mode, reference the sorting index rather + ** than the source table */ + int sortingIdx; /* Cursor number of the sorting index */ + ExprList *pGroupBy; /* The group by clause */ + int nSortingColumn; /* Number of columns in the sorting index */ + struct AggInfo_col { /* For each column used in source tables */ + Table *pTab; /* Source table */ + int iTable; /* Cursor number of the source table */ + int iColumn; /* Column number within the source table */ + int iSorterColumn; /* Column number in the sorting index */ + int iMem; /* Memory location that acts as accumulator */ + Expr *pExpr; /* The original expression */ + } *aCol; + int nColumn; /* Number of used entries in aCol[] */ + int nColumnAlloc; /* Number of slots allocated for aCol[] */ + int nAccumulator; /* Number of columns that show through to the output. + ** Additional columns are used only as parameters to + ** aggregate functions */ + struct AggInfo_func { /* For each aggregate function */ + Expr *pExpr; /* Expression encoding the function */ + FuncDef *pFunc; /* The aggregate function implementation */ + int iMem; /* Memory location that acts as accumulator */ + int iDistinct; /* Ephermeral table used to enforce DISTINCT */ + } *aFunc; + int nFunc; /* Number of entries in aFunc[] */ + int nFuncAlloc; /* Number of slots allocated for aFunc[] */ +}; + +/* +** Each node of an expression in the parse tree is an instance +** of this structure. +** +** Expr.op is the opcode. The integer parser token codes are reused +** as opcodes here. For example, the parser defines TK_GE to be an integer +** code representing the ">=" operator. This same integer code is reused +** to represent the greater-than-or-equal-to operator in the expression +** tree. +** +** Expr.pRight and Expr.pLeft are subexpressions. Expr.pList is a list +** of argument if the expression is a function. +** +** Expr.token is the operator token for this node. For some expressions +** that have subexpressions, Expr.token can be the complete text that gave +** rise to the Expr. In the latter case, the token is marked as being +** a compound token. +** +** An expression of the form ID or ID.ID refers to a column in a table. +** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is +** the integer cursor number of a VDBE cursor pointing to that table and +** Expr.iColumn is the column number for the specific column. If the +** expression is used as a result in an aggregate SELECT, then the +** value is also stored in the Expr.iAgg column in the aggregate so that +** it can be accessed after all aggregates are computed. +** +** If the expression is a function, the Expr.iTable is an integer code +** representing which function. If the expression is an unbound variable +** marker (a question mark character '?' in the original SQL) then the +** Expr.iTable holds the index number for that variable. +** +** If the expression is a subquery then Expr.iColumn holds an integer +** register number containing the result of the subquery. If the +** subquery gives a constant result, then iTable is -1. If the subquery +** gives a different answer at different times during statement processing +** then iTable is the address of a subroutine that computes the subquery. +** +** The Expr.pSelect field points to a SELECT statement. The SELECT might +** be the right operand of an IN operator. Or, if a scalar SELECT appears +** in an expression the opcode is TK_SELECT and Expr.pSelect is the only +** operand. +** +** If the Expr is of type OP_Column, and the table it is selecting from +** is a disk table or the "old.*" pseudo-table, then pTab points to the +** corresponding table definition. +*/ +struct Expr { + u8 op; /* Operation performed by this node */ + char affinity; /* The affinity of the column or 0 if not a column */ + u16 flags; /* Various flags. See below */ + CollSeq *pColl; /* The collation type of the column or 0 */ + Expr *pLeft, *pRight; /* Left and right subnodes */ + ExprList *pList; /* A list of expressions used as function arguments + ** or in " IN (aCol[] or ->aFunc[] */ + int iRightJoinTable; /* If EP_FromJoin, the right table of the join */ + Select *pSelect; /* When the expression is a sub-select. Also the + ** right side of " IN (