Index: Makefile ================================================================== --- Makefile +++ Makefile @@ -7,11 +7,11 @@ SRCDIR = ./src #### The directory into which object code files should be written. # # -OBJDIR = ./bld +OBJDIR = ./obj #### C Compiler and options for use in building executables that # will run on the platform that is doing the build. This is used # to compile code-generator programs as part of the build process. # See TCC below for the C compiler for building the finished binary. Index: src/add.c ================================================================== --- src/add.c +++ src/add.c @@ -44,11 +44,11 @@ || strcmp(zPath, "_FOSSIL_-shm")==0 || strcmp(zPath, ".fos")==0 || strcmp(zPath, ".fos-journal")==0 || strcmp(zPath, ".fos-wal")==0 || strcmp(zPath, ".fos-shm")==0 - || (pOmit && blob_compare(&pathname, pOmit)==0) + || blob_compare(&pathname, pOmit)==0 ){ fossil_warning("cannot add %s", zPath); }else{ if( !file_is_simple_pathname(zPath) ){ fossil_fatal("filename contains illegal characters: %s", zPath); @@ -271,115 +271,10 @@ free(zName); } db_multi_exec("DELETE FROM vfile WHERE deleted AND rid=0"); db_end_transaction(0); } - -/* -** COMMAND: addremove -** -** Usage: %fossil addremove ?--dotfiles? ?--ignore GLOBPATTERN? ?--test? -** -** Do all necessary "add" and "rm" commands to synchronize the repository -** with the content of the working checkout -** -** * All files in the checkout but not in the repository (that is, -** all files displayed using the "extra" command) are added as -** if by the "add" command. -** -** * All files in the repository but missing from the checkout (that is, -** all files that show as MISSING with the "status" command) are -** removed as if by the "rm" command. -** -** The command does not "commit". You must run the "commit" separately -** as a separate step. -** -** Files and directories whose names begin with "." are ignored unless -** the --dotfiles option is used. -** -** The --ignore option overrides the "ignore-glob" setting. See -** documentation on the "setting" command for further information. -** -** The --test option shows what would happen without actually doing anything. -** -** This command can be used to track third party software. -*/ -void import_cmd(void){ - Blob path; - const char *zIgnoreFlag = find_option("ignore",0,1); - int allFlag = find_option("dotfiles",0,0)!=0; - int isTest = find_option("test",0,0)!=0; - int n; - Stmt q; - int vid; - Blob repo; - int nAdd = 0; - int nDelete = 0; - - if( zIgnoreFlag==0 ){ - zIgnoreFlag = db_get("ignore-glob", 0); - } - db_must_be_within_tree(); - vid = db_lget_int("checkout",0); - if( vid==0 ){ - fossil_panic("no checkout to add to"); - } - db_begin_transaction(); - db_multi_exec("CREATE TEMP TABLE sfile(x TEXT PRIMARY KEY)"); - n = strlen(g.zLocalRoot); - blob_init(&path, g.zLocalRoot, n-1); - /* now we read the complete file structure into a temp table */ - vfile_scan(0, &path, blob_size(&path), allFlag); - if( file_tree_name(g.zRepositoryName, &repo, 0) ){ - db_multi_exec("DELETE FROM sfile WHERE x=%B", &repo); - } - - /* step 1: search for extra files */ - db_prepare(&q, - "SELECT x, %Q || x FROM sfile" - " WHERE x NOT IN ('manifest','manifest.uuid','_FOSSIL_'," - "'_FOSSIL_-journal','.fos','.fos-journal'," - "'_FOSSIL_-wal','_FOSSIL_-shm','.fos-wal'," - "'.fos-shm')" - " AND NOT %s" - " ORDER BY 1", - g.zLocalRoot, - glob_expr("x", zIgnoreFlag) - ); - while( db_step(&q)==SQLITE_ROW ){ - add_one_file(db_column_text(&q, 1), vid, 0); - nAdd++; - } - db_finalize(&q); - /* step 2: search for missing files */ - db_prepare(&q, - "SELECT pathname,%Q || pathname,deleted FROM vfile" - " WHERE deleted!=1" - " ORDER BY 1", - g.zLocalRoot - ); - while( db_step(&q)==SQLITE_ROW ){ - const char * zFile; - const char * zPath; - - zFile = db_column_text(&q, 0); - zPath = db_column_text(&q, 1); - if( !file_isfile(zPath) ){ - if( !isTest ){ - db_multi_exec("UPDATE vfile SET deleted=1 WHERE pathname=%Q", zFile); - } - printf("DELETED %s\n", zFile); - nDelete++; - } - } - db_finalize(&q); - /* show cmmand summary */ - printf("added %d files, deleted %d files\n", nAdd, nDelete); - - db_end_transaction(isTest); -} - /* ** Rename a single file. ** ** The original name of the file is zOrig. The new filename is zNew. Index: src/allrepo.c ================================================================== --- src/allrepo.c +++ src/allrepo.c @@ -60,13 +60,10 @@ ** On Win32 systems, the file is named "_fossil" and is located in ** %LOCALAPPDATA%, %APPDATA% or %HOMEPATH%. ** ** Available operations are: ** -** ignore Arguments are repositories that should be ignored -** by subsequent list, pull, push, rebuild, and sync. -** ** list | ls Display the location of all repositories ** ** pull Run a "pull" operation on all repositories ** ** push Run a "push" on all repositories @@ -75,12 +72,11 @@ ** ** sync Run a "sync" on all repositories ** ** Respositories are automatically added to the set of known repositories ** when one of the following commands against the repository: clone, info, -** pull, push, or sync. Even previously ignored repositories are added back -** to the list of repositories by these commands. +** pull, push, or sync */ void all_cmd(void){ int n; Stmt q; const char *zCmd; @@ -103,24 +99,15 @@ zCmd = "pull -autourl -R"; }else if( strncmp(zCmd, "rebuild", n)==0 ){ zCmd = "rebuild"; }else if( strncmp(zCmd, "sync", n)==0 ){ zCmd = "sync -autourl -R"; - }else if( strncmp(zCmd, "ignore", n)==0 ){ - int j; - db_begin_transaction(); - for(j=3; j=3 ) zCmd = g.argv[2]; - n = strlen(zCmd); - if( strncmp(zCmd,"new",n)==0 ){ + n = strlen(g.argv[2]); + if( n>=2 && strncmp(g.argv[2],"new",n)==0 ){ branch_new(); - }else if( strncmp(zCmd,"list",n)==0 ){ + }else if( n>=2 && strncmp(g.argv[2],"list",n)==0 ){ Stmt q; - int vid; - char *zCurrent = 0; - - if( g.localOpen ){ - vid = db_lget_int("checkout", 0); - zCurrent = db_text(0, "SELECT value FROM tagxref" - " WHERE rid=%d AND tagid=%d", vid, TAG_BRANCH); - } - compute_leaves(0, 1); db_prepare(&q, - "SELECT DISTINCT value FROM tagxref" - " WHERE tagid=%d AND value NOT NULL AND rid IN leaves" - " ORDER BY value /*sort*/", - TAG_BRANCH + "%s" + " AND blob.rid IN (SELECT rid FROM tagxref" + " WHERE tagid=%d AND tagtype==2 AND srcid!=0)" + " ORDER BY event.mtime DESC", + timeline_query_for_tty(), TAG_BRANCH ); - while( db_step(&q)==SQLITE_ROW ){ - const char *zBr = db_column_text(&q, 0); - int isCur = zCurrent!=0 && strcmp(zCurrent,zBr)==0; - printf("%s%s\n", (isCur ? "* " : " "), zBr); - } + print_timeline(&q, 2000); db_finalize(&q); }else{ fossil_panic("branch subcommand should be one of: " "new list"); } Index: src/checkin.c ================================================================== --- src/checkin.c +++ src/checkin.c @@ -130,11 +130,11 @@ printf("repository: %s\n", db_lget("repository","")); printf("local-root: %s\n", g.zLocalRoot); printf("server-code: %s\n", db_get("server-code", "")); vid = db_lget_int("checkout", 0); if( vid ){ - show_common_info(vid, "checkout:", 1, 1); + show_common_info(vid, "checkout:", 0); } changes_cmd(); } /* @@ -418,11 +418,11 @@ blob_add_cr(&text); #endif blob_write_to_file(&text, zFile); zCmd = mprintf("%s \"%s\"", zEditor, zFile); printf("%s\n", zCmd); - if( fossil_system(zCmd) ){ + if( portable_system(zCmd) ){ fossil_panic("editor aborted"); } blob_reset(&text); blob_read_from_file(&text, zFile); blob_remove_cr(&text); @@ -697,20 +697,21 @@ ** ** Usage: %fossil commit ?OPTIONS? ?FILE...? ** ** Create a new version containing all of the changes in the current ** checkout. You will be prompted to enter a check-in comment unless -** the comment has been specified on the command-line using "-m" or a -** file containing the comment using -M. The editor defined in the -** "editor" fossil option (see %fossil help set) will be used, or from -** the "VISUAL" or "EDITOR" environment variables (in that order) if -** no editor is set. -** -** All files that have changed will be committed unless some subset of -** files is specified on the command line. -** -** The --branch option followed by a branch name causes the new check-in +** the comment has been specified on the command-line using "-m". +** The editor defined in the "editor" fossil option (see %fossil help set) +** will be used, or from the "VISUAL" or "EDITOR" environment variables +** (in that order) if no editor is set. +** +** You will be prompted for your GPG passphrase in order to sign the +** new manifest unless the "--nosign" option is used. All files that +** have changed will be committed unless some subset of files is +** specified on the command line. +** +** The --branch option followed by a branch name cases the new check-in ** to be placed in the named branch. The --bgcolor option can be followed ** by a color name (ex: '#ffc0c0') to specify the background color of ** entries in the new branch when shown in the web timeline interface. ** ** A check-in is not permitted to fork unless the --force or -f @@ -720,11 +721,10 @@ ** Children of private check-ins are automatically private. ** ** Options: ** ** --comment|-m COMMENT-TEXT -** --message-file|-M COMMENT-FILE ** --branch NEW-BRANCH-NAME ** --bgcolor COLOR ** --nosign ** --force|-f ** --private @@ -807,18 +807,11 @@ /* ** Autosync if autosync is enabled and this is not a private check-in. */ if( !g.markPrivate ){ - if( autosync(AUTOSYNC_PULL) ){ - Blob ans; - blob_zero(&ans); - prompt_user("continue in spite of sync failure (y/N)? ", &ans); - if( blob_str(&ans)[0]!='y' ){ - fossil_exit(1); - } - } + autosync(AUTOSYNC_PULL); } /* Require confirmation to continue with the check-in if there is ** clock skew */ @@ -1066,11 +1059,10 @@ ** calculated before the checkin started (and stored as the R record ** of the manifest file). */ vfile_aggregate_checksum_repository(nvid, &cksum2); if( blob_compare(&cksum1, &cksum2) ){ - vfile_compare_repository_to_disk(nvid); fossil_panic("tree checksum does not match repository after commit"); } /* Verify that the manifest checksum matches the expected checksum */ vfile_aggregate_checksum_manifest(nvid, &cksum2, &cksum1b); Index: src/clearsign.c ================================================================== --- src/clearsign.c +++ src/clearsign.c @@ -39,11 +39,11 @@ zRand = db_text(0, "SELECT hex(randomblob(10))"); zOut = mprintf("out-%s", zRand); zIn = mprintf("in-%z", zRand); blob_write_to_file(pIn, zOut); zCmd = mprintf("%s %s %s", zBase, zIn, zOut); - rc = fossil_system(zCmd); + rc = portable_system(zCmd); free(zCmd); if( rc==0 ){ if( pOut==pIn ){ blob_reset(pIn); } Index: src/configure.c ================================================================== --- src/configure.c +++ src/configure.c @@ -495,11 +495,11 @@ } } db_end_transaction(0); printf("Configuration reset to factory defaults.\n"); printf("To recover, use: %s %s import %s\n", - fossil_nameofexe(), g.argv[1], zBackup); + g.argv[0], g.argv[1], zBackup); }else { fossil_fatal("METHOD should be one of:" " export import merge pull push reset"); } Index: src/db.c ================================================================== --- src/db.c +++ src/db.c @@ -73,11 +73,11 @@ g.cgiOutput = 0; cgi_printf("

Database Error

\n" "
%h

%s

", z, zRebuildMsg); cgi_reply(); }else{ - fprintf(stderr, "%s: %s\n\n%s", fossil_nameofexe(), z, zRebuildMsg); + fprintf(stderr, "%s: %s\n\n%s", g.argv[0], z, zRebuildMsg); } db_force_rollback(); fossil_exit(1); } @@ -687,11 +687,10 @@ g.dbConfig = 0; }else{ g.dbConfig = openDatabase(zDbName); } g.configOpen = 1; - free(zDbName); } /* ** If zDbName is a valid local database file, open it and return ** true. If it is not a valid local database file, return 0. @@ -1032,14 +1031,12 @@ } } /* ** COMMAND: new -** COMMAND: init ** ** Usage: %fossil new ?OPTIONS? FILENAME -** Or: %fossil init ?OPTIONS? FILENAME ** ** Create a repository for a new project in the file named FILENAME. ** This command is distinct from "clone". The "clone" command makes ** a copy of an existing project. This command starts a new project. ** @@ -1526,11 +1523,11 @@ { "gdiff-command", 0, 16, "gdiff" }, { "ignore-glob", 0, 40, "" }, { "http-port", 0, 16, "8080" }, { "localauth", 0, 0, "off" }, { "manifest", 0, 0, "off" }, - { "mtime-changes", 0, 0, "on" }, + { "mtime-changes", 0, 0, "off" }, { "pgp-command", 0, 32, "gpg --clearsign -o " }, { "proxy", 0, 32, "off" }, { "repo-cksum", 0, 0, "on" }, { "ssh-command", 0, 32, "" }, { "web-browser", 0, 32, "" }, Index: src/deltacmd.c ================================================================== --- src/deltacmd.c +++ src/deltacmd.c @@ -49,11 +49,12 @@ ** the first file into the second. */ void delta_create_cmd(void){ Blob orig, target, delta; if( g.argc!=5 ){ - usage("ORIGIN TARGET DELTA"); + fprintf(stderr,"Usage: %s %s ORIGIN TARGET DELTA\n", g.argv[0], g.argv[1]); + fossil_exit(1); } if( blob_read_from_file(&orig, g.argv[2])<0 ){ fprintf(stderr,"cannot read %s\n", g.argv[2]); fossil_exit(1); } @@ -111,11 +112,12 @@ ** and write the result. */ void delta_apply_cmd(void){ Blob orig, target, delta; if( g.argc!=5 ){ - usage("ORIGIN DELTA TARGET"); + fprintf(stderr,"Usage: %s %s ORIGIN DELTA TARGET\n", g.argv[0], g.argv[1]); + fossil_exit(1); } if( blob_read_from_file(&orig, g.argv[2])<0 ){ fprintf(stderr,"cannot read %s\n", g.argv[2]); fossil_exit(1); } Index: src/diffcmd.c ================================================================== --- src/diffcmd.c +++ src/diffcmd.c @@ -24,10 +24,30 @@ /* ** Diff option flags */ #define DIFF_NEWFILE 0x01 /* Treat non-existing fails as empty files */ #define DIFF_NOEOLWS 0x02 /* Ignore whitespace at the end of lines */ + +/* +** This function implements a cross-platform "system()" interface. +*/ +int portable_system(const char *zOrigCmd){ + int rc; +#if defined(_WIN32) + /* On windows, we have to put double-quotes around the entire command. + ** Who knows why - this is just the way windows works. + */ + char *zNewCmd = mprintf("\"%s\"", zOrigCmd); + rc = system(zNewCmd); + free(zNewCmd); +#else + /* On unix, evaluate the command directly. + */ + rc = system(zOrigCmd); +#endif + return rc; +} /* ** Show the difference between two files, one in memory and one on disk. ** ** The difference is the set of edits needed to transform pFile1 into @@ -86,11 +106,11 @@ shell_escape(&cmd, blob_str(&nameFile1)); blob_append(&cmd, " ", 1); shell_escape(&cmd, zFile2); /* Run the external diff command */ - fossil_system(blob_str(&cmd)); + portable_system(blob_str(&cmd)); /* Delete the temporary file and clean up memory used */ unlink(blob_str(&nameFile1)); blob_reset(&nameFile1); blob_reset(&cmd); @@ -140,11 +160,11 @@ shell_escape(&cmd, zTemp1); blob_append(&cmd, " ", 1); shell_escape(&cmd, zTemp2); /* Run the external diff command */ - fossil_system(blob_str(&cmd)); + portable_system(blob_str(&cmd)); /* Delete the temporary file and clean up memory used */ unlink(zTemp1); unlink(zTemp2); blob_reset(&cmd); @@ -407,13 +427,10 @@ ** ** The "-i" command-line option forces the use of the internal diff logic ** rather than any external diff program that might be configured using ** the "setting" command. If no external diff program is configured, then ** the "-i" option is a no-op. The "-i" option converts "gdiff" into "diff". -** -** The "-N" or "--new-file" option causes the complete text of added or -** deleted files to be displayed. */ void diff_cmd(void){ int isGDiff; /* True for gdiff. False for normal diff */ int isInternDiff; /* True for internal diff */ int hasNFlag; /* True if -N or --new-file flag is used */ Index: src/doc.c ================================================================== --- src/doc.c +++ src/doc.c @@ -240,11 +240,10 @@ { "step", 4, "application/STEP" }, { "stl", 3, "application/SLA" }, { "stp", 3, "application/STEP" }, { "sv4cpio", 7, "application/x-sv4cpio" }, { "sv4crc", 6, "application/x-sv4crc" }, - { "svg", 3, "image/svg+xml" }, { "swf", 3, "application/x-shockwave-flash" }, { "t", 1, "application/x-troff" }, { "tar", 3, "application/x-tar" }, { "tcl", 3, "application/x-tcl" }, { "tex", 3, "application/x-tex" }, DELETED src/export.c Index: src/export.c ================================================================== --- src/export.c +++ /dev/null @@ -1,203 +0,0 @@ -/* -** Copyright (c) 2010 D. Richard Hipp -** -** This program is free software; you can redistribute it and/or -** modify it under the terms of the Simplified BSD License (also -** known as the "2-Clause License" or "FreeBSD License".) - -** This program is distributed in the hope that it will be useful, -** but without any warranty; without even the implied warranty of -** merchantability or fitness for a particular purpose. -** -** Author contact information: -** drh@sqlite.org -** -******************************************************************************* -** -** This file contains code used to export the content of a Fossil -** repository in the git-fast-import format. -*/ -#include "config.h" -#include "export.h" -#include - -/* -** Output a "committer" record for the given user. -*/ -static void print_person(const char *zUser){ - static Stmt q; - const char *zContact; - char *zName; - char *zEmail; - int i, j; - - if( zUser==0 ){ - printf(" "); - return; - } - db_static_prepare(&q, "SELECT info FROM user WHERE login=:user"); - db_bind_text(&q, ":user", zUser); - if( db_step(&q)!=SQLITE_ROW ){ - db_reset(&q); - for(i=0; zUser[i] && zUser[i]!='>' && zUser[i]!='<'; i++){} - if( zUser[i]==0 ){ - printf(" <%s>", zUser); - return; - } - zName = mprintf("%s", zUser); - for(i=j=0; zName[i]; i++){ - if( zName[i]!='<' && zName[i]!='>' ){ - zName[j++] = zName[i]; - } - } - zName[j] = 0; - printf(" %s <%s>", zName, zUser); - free(zName); - return; - } - zContact = db_column_text(&q, 0); - for(i=0; zContact[i] && zContact[i]!='>' && zContact[i]!='<'; i++){} - if( zContact[i]==0 ){ - printf(" %s <%s>", zContact, zUser); - db_reset(&q); - return; - } - if( zContact[i]=='<' ){ - zEmail = mprintf("%s", &zContact[i]); - for(i=0; zEmail[i] && zEmail[i]!='>'; i++){} - if( zEmail[i]=='>' ) zEmail[i+1] = 0; - }else{ - zEmail = mprintf("<%s>", zUser); - } - zName = mprintf("%.*s", i, zContact); - for(i=j=0; zName[i]; i++){ - if( zName[i]!='"' ) zName[j++] = zName[i]; - } - zName[j] = 0; - printf(" %s %s", zName, zEmail); - free(zName); - free(zEmail); - db_reset(&q); -} - - -/* -** COMMAND: export -** -** Usage: %fossil export -** -** Write an export of all check-ins to standard output. The export is -** written in the Git "fast-import" format. -** -** Run this command within a checkout. Or use the -R or --repository -** option to specify a Fossil repository to be exported. -** -** Only check-ins are exported. Git does not support tickets or wiki -** or events or attachments, so none of that is exported. -*/ -void export_cmd(void){ - Stmt q; - int i; - Bag blobs, vers; - bag_init(&blobs); - bag_init(&vers); - - db_find_and_open_repository(1); - - /* Step 1: Generate "blob" records for every artifact that is part - ** of a check-in - */ - fossil_binary_mode(stdout); - db_prepare(&q, "SELECT DISTINCT fid FROM mlink WHERE fid>0"); - while( db_step(&q)==SQLITE_ROW ){ - int rid = db_column_int(&q, 0); - Blob content; - content_get(rid, &content); - printf("blob\nmark :%d\ndata %d\n", rid, blob_size(&content)); - bag_insert(&blobs, rid); - fwrite(blob_buffer(&content), 1, blob_size(&content), stdout); - printf("\n"); - blob_reset(&content); - } - db_finalize(&q); - - /* Output the commit records. - */ - db_prepare(&q, - "SELECT strftime('%%s',mtime), objid, coalesce(comment,ecomment)," - " coalesce(user,euser)," - " (SELECT value FROM tagxref WHERE rid=objid AND tagid=%d)" - " FROM event" - " WHERE type='ci'" - " ORDER BY mtime ASC", - TAG_BRANCH - ); - while( db_step(&q)==SQLITE_ROW ){ - sqlite3_int64 secondsSince1970 = db_column_int64(&q, 0); - int ckinId = db_column_int(&q, 1); - const char *zComment = db_column_text(&q, 2); - const char *zUser = db_column_text(&q, 3); - const char *zBranch = db_column_text(&q, 4); - char *zBr; - Manifest *p; - ManifestFile *pFile; - const char *zFromType; - - bag_insert(&vers, ckinId); - if( zBranch==0 ) zBranch = "trunk"; - zBr = mprintf("%s", zBranch); - for(i=0; zBr[i]; i++){ - if( !fossil_isalnum(zBr[i]) ) zBr[i] = '_'; - } - printf("commit refs/heads/%s\nmark :%d\n", zBr, ckinId); - free(zBr); - printf("committer"); - print_person(zUser); - printf(" %lld +0000\n", secondsSince1970); - if( zComment==0 ) zComment = "null comment"; - printf("data %d\n%s\n", (int)strlen(zComment), zComment); - p = manifest_get(ckinId, CFTYPE_ANY); - zFromType = "from"; - for(i=0; inParent; i++){ - int pid = fast_uuid_to_rid(p->azParent[i]); - if( pid==0 || !bag_find(&vers, pid) ) continue; - printf("%s :%d\n", zFromType, fast_uuid_to_rid(p->azParent[i])); - zFromType = "merge"; - } - printf("deleteall\n"); - manifest_file_rewind(p); - while( (pFile=manifest_file_next(p, 0))!=0 ){ - int fid = fast_uuid_to_rid(pFile->zUuid); - const char *zPerm = "100644"; - if( fid==0 ) continue; - if( pFile->zPerm && strstr(pFile->zPerm,"x") ) zPerm = "100755"; - if( !bag_find(&blobs, fid) ) continue; - printf("M %s :%d %s\n", zPerm, fid, pFile->zName); - } - manifest_destroy(p); - printf("\n"); - } - db_finalize(&q); - bag_clear(&blobs); - - - /* Output tags */ - db_prepare(&q, - "SELECT tagname, rid, strftime('%%s',mtime)" - " FROM tagxref JOIN tag USING(tagid)" - " WHERE tagtype=1 AND tagname GLOB 'sym-*'" - ); - while( db_step(&q)==SQLITE_ROW ){ - const char *zTagname = db_column_text(&q, 0); - int rid = db_column_int(&q, 1); - sqlite3_int64 secSince1970 = db_column_int64(&q, 2); - if( rid==0 || !bag_find(&vers, rid) ) continue; - zTagname += 4; - printf("tag %s\n", zTagname); - printf("from :%d\n", rid); - printf("tagger %lld +0000\n", secSince1970); - printf("data 0\n"); - } - db_finalize(&q); - bag_clear(&vers); -} Index: src/file.c ================================================================== --- src/file.c +++ src/file.c @@ -309,19 +309,16 @@ void cmd_test_canonical_name(void){ int i; Blob x; blob_zero(&x); for(i=2; i0 && g.zLocalRoot[n-1]=='/' ); - nFull = blob_size(&full); - zFull = blob_buffer(&full); - - /* Special case. zOrigName refers to g.zLocalRoot directory. */ - if( nFull==n-1 && memcmp(g.zLocalRoot, zFull, nFull)==0 ){ - blob_append(pOut, ".", 1); - return 1; - } - - if( nFull<=n || memcmp(g.zLocalRoot, zFull, n) ){ + if( blob_size(&full)<=n || memcmp(g.zLocalRoot, blob_buffer(&full), n) ){ blob_reset(&full); if( errFatal ){ fossil_fatal("file outside of checkout tree: %s", zOrigName); } return 0; } - blob_append(pOut, &zFull[n], nFull-n); + blob_zero(pOut); + blob_append(pOut, blob_buffer(&full)+n, blob_size(&full)-n); return 1; } /* ** COMMAND: test-tree-name Index: src/finfo.c ================================================================== --- src/finfo.c +++ src/finfo.c @@ -21,168 +21,75 @@ #include "finfo.h" /* ** COMMAND: finfo ** -** Usage: %fossil finfo {?-l|--log? / -s|--status / --p|--print} FILENAME -** -** Print the complete change history for a single file going backwards -** in time. The default is -l. -** -** For the -l|--log option: If "-b|--brief" is specified one line per revision -** is printed, otherwise the full comment is printed. The "--limit N" -** and "--offset P" options limits the output to the first N changes -** after skipping P changes. -** -** In the -s form prints the status as . This is -** a quick status and does not check for up-to-date-ness of the file. -** -** The -p form, there's an optional flag "-r|--revision REVISION". The -** specified version (or the latest checked out version) is printed to -** stdout. +** Usage: %fossil finfo FILENAME +** +** Print the change history for a single file. +** +** The "--limit N" and "--offset P" options limit the output to the first +** N changes after skipping P changes. */ void finfo_cmd(void){ + Stmt q; int vid; + Blob dest; + const char *zFilename; + const char *zLimit; + const char *zOffset; + int iLimit, iOffset; db_must_be_within_tree(); vid = db_lget_int("checkout", 0); if( vid==0 ){ fossil_panic("no checkout to finfo files in"); } - vfile_check_signature(vid, 1); - if (find_option("status","s",0)) { - Stmt q; - Blob line; - Blob fname; - - if( g.argc!=3 ) usage("-s|--status FILENAME"); - file_tree_name(g.argv[2], &fname, 1); - db_prepare(&q, - "SELECT pathname, deleted, rid, chnged, coalesce(origname!=pathname,0)" - " FROM vfile WHERE vfile.pathname=%B", &fname); - blob_zero(&line); - if ( db_step(&q)==SQLITE_ROW ) { - Blob uuid; - int isDeleted = db_column_int(&q, 1); - int isNew = db_column_int(&q,2) == 0; - int chnged = db_column_int(&q,3); - int renamed = db_column_int(&q,4); - - blob_zero(&uuid); - db_blob(&uuid, - "SELECT uuid FROM blob, mlink, vfile WHERE " - "blob.rid = mlink.mid AND mlink.fid = vfile.rid AND " - "vfile.pathname=%B", - &fname - ); - if( isNew ){ - blob_appendf(&line, "new"); - }else if( isDeleted ){ - blob_appendf(&line, "deleted"); - }else if( renamed ){ - blob_appendf(&line, "renamed"); - }else if( chnged ){ - blob_appendf(&line, "edited"); - }else{ - blob_appendf(&line, "unchanged"); - } - blob_appendf(&line, " "); - blob_appendf(&line, " %10.10s", blob_str(&uuid)); - blob_reset(&uuid); - }else{ - blob_appendf(&line, "unknown 0000000000"); - } - db_finalize(&q); - printf("%s\n", blob_str(&line)); - blob_reset(&fname); - blob_reset(&line); - }else if( find_option("print","p",0) ){ - Blob record; - Blob fname; - const char *zRevision = find_option("revision", "r", 1); - - file_tree_name(g.argv[2], &fname, 1); - if( zRevision ){ - historical_version_of_file(zRevision, blob_str(&fname), &record, 0); - }else{ - int rid = db_int(0, "SELECT rid FROM vfile WHERE pathname=%B", &fname); - if( rid==0 ){ - fossil_fatal("no history for file: %b", &fname); - } - content_get(rid, &record); - } - blob_write_to_file(&record, "-"); - blob_reset(&record); - blob_reset(&fname); - }else{ - Blob line; - Stmt q; - Blob fname; - int rid; - const char *zFilename; - const char *zLimit; - const char *zOffset; - int iLimit, iOffset, iBrief; - - if( find_option("log","l",0) ){ - /* this is the default, no-op */ - } - zLimit = find_option("limit",0,1); - iLimit = zLimit ? atoi(zLimit) : -1; - zOffset = find_option("offset",0,1); - iOffset = zOffset ? atoi(zOffset) : 0; - iBrief = (find_option("brief","b",0) == 0); - if( g.argc!=3 ){ - usage("?-l|--log? ?-b|--brief? FILENAME"); - } - file_tree_name(g.argv[2], &fname, 1); - rid = db_int(0, "SELECT rid FROM vfile WHERE pathname=%B", &fname); - if( rid==0 ){ - fossil_fatal("no history for file: %b", &fname); - } - zFilename = blob_str(&fname); - db_prepare(&q, - "SELECT b.uuid, ci.uuid, date(event.mtime,'localtime')," - " coalesce(event.ecomment, event.comment)," - " coalesce(event.euser, event.user)" - " FROM mlink, blob b, event, blob ci" - " WHERE mlink.fnid=(SELECT fnid FROM filename WHERE name=%Q)" - " AND b.rid=mlink.fid" - " AND event.objid=mlink.mid" - " AND event.objid=ci.rid" - " ORDER BY event.mtime DESC LIMIT %d OFFSET %d", - zFilename, iLimit, iOffset - ); - blob_zero(&line); - if( iBrief ){ - printf("History of %s\n", blob_str(&fname)); - } - while( db_step(&q)==SQLITE_ROW ){ - const char *zFileUuid = db_column_text(&q, 0); - const char *zCiUuid = db_column_text(&q,1); - const char *zDate = db_column_text(&q, 2); - const char *zCom = db_column_text(&q, 3); - const char *zUser = db_column_text(&q, 4); - char *zOut; - if( iBrief ){ - printf("%s ", zDate); - zOut = sqlite3_mprintf("[%.10s] %s (user: %s, artifact: [%.10s])", - zCiUuid, zCom, zUser, zFileUuid); - comment_print(zOut, 11, 79); - sqlite3_free(zOut); - }else{ - blob_reset(&line); - blob_appendf(&line, "%.10s ", zCiUuid); - blob_appendf(&line, "%.10s ", zDate); - blob_appendf(&line, "%8.8s ", zUser); - blob_appendf(&line,"%-40.40s\n", zCom ); - comment_print(blob_str(&line), 0, 79); - } - } - db_finalize(&q); - blob_reset(&fname); - } + zLimit = find_option("limit",0,1); + iLimit = zLimit ? atoi(zLimit) : -1; + zOffset = find_option("offset",0,1); + iOffset = zOffset ? atoi(zOffset) : 0; + if (g.argc<3) { + usage("FILENAME"); + } + file_tree_name(g.argv[2], &dest, 1); + zFilename = blob_str(&dest); + db_prepare(&q, + "SELECT " + " (SELECT uuid FROM blob WHERE rid=mlink.fid)," /* New file */ + " (SELECT uuid FROM blob WHERE rid=mlink.mid)," /* The check-in */ + " date(event.mtime,'localtime')," + " coalesce(event.ecomment, event.comment)," + " coalesce(event.euser, event.user)" + " FROM mlink, event" + " WHERE mlink.fnid=(SELECT fnid FROM filename WHERE name=%Q)" + " AND event.objid=mlink.mid" + " ORDER BY event.mtime DESC LIMIT %d OFFSET %d /*sort*/", + zFilename, iLimit, iOffset + ); + + printf("History of %s\n", zFilename); + while( db_step(&q)==SQLITE_ROW ){ + const char *zFileUuid = db_column_text(&q, 0); + const char *zCiUuid = db_column_text(&q, 1); + const char *zDate = db_column_text(&q, 2); + const char *zCom = db_column_text(&q, 3); + const char *zUser = db_column_text(&q, 4); + char *zOut; + printf("%s ", zDate); + if( zFileUuid==0 ){ + zOut = sqlite3_mprintf("[%.10s] DELETED %s (user: %s)", + zCiUuid, zCom, zUser); + }else{ + zOut = sqlite3_mprintf("[%.10s] %s (user: %s, artifact: [%.10s])", + zCiUuid, zCom, zUser, zFileUuid); + } + comment_print(zOut, 11, 79); + sqlite3_free(zOut); + } + db_finalize(&q); + blob_reset(&dest); } /* ** WEBPAGE: finfo @@ -229,11 +136,11 @@ hyperlinked_path(zFilename, &title, 0); @

%b(&title)

blob_reset(&title); pGraph = graph_init(); @
- @ + @
while( db_step(&q)==SQLITE_ROW ){ const char *zDate = db_column_text(&q, 0); const char *zCom = db_column_text(&q, 1); const char *zUser = db_column_text(&q, 2); int fpid = db_column_int(&q, 3); Index: src/graph.c ================================================================== --- src/graph.c +++ src/graph.c @@ -299,11 +299,11 @@ } } /* Find the pChild pointer for each node. ** - ** The pChild points to the node directly above on the same rail. + ** The pChild points to node directly above on the same rail. ** The pChild must be in the same branch. Leaf nodes have a NULL ** pChild. ** ** In the case of a fork, choose the pChild that results in the ** longest rail. @@ -380,11 +380,11 @@ } pRow->iRail = findFreeRail(p, 0, pParent->idx, inUse, pParent->iRail); pParent->aiRaiser[pRow->iRail] = pRow->idx; } mask = 1<iRail; -/* if( pRow->pPrev ) pRow->pPrev->railInUse |= mask; */ + if( pRow->pPrev ) pRow->pPrev->railInUse |= mask; if( pRow->pNext ) pRow->pNext->railInUse |= mask; if( pRow->pChild==0 ){ inUse &= ~mask; }else{ inUse |= mask; Index: src/http.c ================================================================== --- src/http.c +++ src/http.c @@ -103,11 +103,11 @@ if( i>0 && g.urlPath[i-1]=='/' ){ zSep = ""; }else{ zSep = "/"; } - blob_appendf(pHdr, "POST %s%sxfer/xfer HTTP/1.0\r\n", g.urlPath, zSep); + blob_appendf(pHdr, "POST %s%sxfer HTTP/1.0\r\n", g.urlPath, zSep); if( g.urlProxyAuth ){ blob_appendf(pHdr, "Proxy-Authorization: %s\n", g.urlProxyAuth); } blob_appendf(pHdr, "Host: %s\r\n", g.urlHostname); blob_appendf(pHdr, "User-Agent: Fossil/" MANIFEST_VERSION "\r\n"); @@ -127,11 +127,11 @@ ** ** The server address is contain in the "g" global structure. The ** url_parse() routine should have been called prior to this routine ** in order to fill this structure appropriately. */ -int http_exchange(Blob *pSend, Blob *pReply, int useLogin){ +void http_exchange(Blob *pSend, Blob *pReply, int useLogin){ Blob login; /* The login card */ Blob payload; /* The complete payload including login card */ Blob hdr; /* The HTTP request header */ int closeConnection; /* True to close the connection when done */ int iLength; /* Length of the reply payload */ @@ -140,12 +140,11 @@ char *zLine; /* A single line of the reply header */ int i; /* Loop counter */ int isError = 0; /* True if the reply is an error message */ if( transport_open() ){ - fossil_warning(transport_errmsg()); - return 1; + fossil_fatal(transport_errmsg()); } /* Construct the login card and prepare the complete payload */ blob_zero(&login); if( useLogin ) http_build_login_card(pSend, &login); @@ -202,11 +201,11 @@ if( sscanf(zLine, "HTTP/1.%d %d", &iHttpVersion, &rc)!=2 ) goto write_err; if( rc!=200 && rc!=302 ){ int ii; for(ii=7; zLine[ii] && zLine[ii]!=' '; ii++){} while( zLine[ii]==' ' ) ii++; - fossil_warning("server says: %s", &zLine[ii]); + fossil_fatal("server says: %s\n", &zLine[ii]); goto write_err; } if( iHttpVersion==0 ){ closeConnection = 1; }else{ @@ -227,24 +226,22 @@ }else if( rc==302 && strncasecmp(zLine, "location:", 9)==0 ){ int i, j; for(i=9; zLine[i] && zLine[i]==' '; i++){} if( zLine[i]==0 ) fossil_fatal("malformed redirect: %s", zLine); j = strlen(zLine) - 1; - while( j>4 && strcmp(&zLine[j-4],"/xfer")==0 ){ - j -= 4; - zLine[j] = 0; - } + if( j>4 && strcmp(&zLine[j-4],"/xfer")==0 ) zLine[j-4] = 0; fossil_print("redirect to %s\n", &zLine[i]); url_parse(&zLine[i]); transport_close(); - return http_exchange(pSend, pReply, useLogin); + http_exchange(pSend, pReply, useLogin); + return; }else if( strncasecmp(zLine, "content-type: text/html", 23)==0 ){ isError = 1; } } if( rc!=200 ){ - fossil_warning("\"location:\" missing from 302 redirect reply"); + fossil_fatal("\"location:\" missing from 302 redirect reply"); goto write_err; } /* ** Extract the reply payload that follows the header @@ -288,14 +285,14 @@ if( closeConnection ){ transport_close(); }else{ transport_rewind(); } - return 0; + return; /* ** Jump to here if an error is seen. */ write_err: transport_close(); - return 1; + return; } Index: src/http_socket.c ================================================================== --- src/http_socket.c +++ src/http_socket.c @@ -152,11 +152,11 @@ if( pHost!=0 ){ memcpy(&addr.sin_addr,pHost->h_addr_list[0],pHost->h_length); }else #endif { - socket_set_errmsg("can't resolve host name: %s", g.urlName); + socket_set_errmsg("can't resolve host name: %s\n", g.urlName); return 1; } } addrIsInit = 1; Index: src/http_transport.c ================================================================== --- src/http_transport.c +++ src/http_transport.c @@ -294,13 +294,13 @@ fprintf(sshOut, "\n\n"); }else if( g.urlIsFile ){ char *zCmd; fclose(transport.pFile); zCmd = mprintf("\"%s\" http \"%s\" \"%s\" \"%s\" 127.0.0.1", - fossil_nameofexe(), g.urlName, transport.zOutFile, transport.zInFile + g.argv[0], g.urlName, transport.zOutFile, transport.zInFile ); - fossil_system(zCmd); + portable_system(zCmd); free(zCmd); transport.pFile = fopen(transport.zInFile, "rb"); } } DELETED src/import.c Index: src/import.c ================================================================== --- src/import.c +++ /dev/null @@ -1,624 +0,0 @@ -/* -** Copyright (c) 2010 D. Richard Hipp -** -** This program is free software; you can redistribute it and/or -** modify it under the terms of the Simplified BSD License (also -** known as the "2-Clause License" or "FreeBSD License".) - -** This program is distributed in the hope that it will be useful, -** but without any warranty; without even the implied warranty of -** merchantability or fitness for a particular purpose. -** -** Author contact information: -** drh@sqlite.org -** -******************************************************************************* -** -** This file contains code used to import the content of a Git -** repository in the git-fast-import format as a new Fossil -** repository. -*/ -#include "config.h" -#include "import.h" -#include - -#if INTERFACE -/* -** A single file change record. -*/ -struct ImportFile { - char *zName; /* Name of a file */ - char *zUuid; /* UUID of the file */ - char *zPrior; /* Prior name if the name was changed */ - char isFrom; /* True if obtained from the parent */ - char isExe; /* True if executable */ -}; -#endif - - -/* -** State information about an on-going fast-import parse. -*/ -static struct { - void (*xFinish)(void); /* Function to finish a prior record */ - int nData; /* Bytes of data */ - char *zTag; /* Name of a tag */ - char *zBranch; /* Name of a branch for a commit */ - char *aData; /* Data content */ - char *zMark; /* The current mark */ - char *zDate; /* Date/time stamp */ - char *zUser; /* User name */ - char *zComment; /* Comment of a commit */ - char *zFrom; /* from value as a UUID */ - char *zFromMark; /* The mark of the "from" field */ - int nMerge; /* Number of merge values */ - int nMergeAlloc; /* Number of slots in azMerge[] */ - char **azMerge; /* Merge values */ - int nFile; /* Number of aFile values */ - int nFileAlloc; /* Number of slots in aFile[] */ - ImportFile *aFile; /* Information about files in a commit */ - int fromLoaded; /* True zFrom content loaded into aFile[] */ -} gg; - -/* -** A no-op "xFinish" method -*/ -static void finish_noop(void){} - -/* -** Deallocate the state information. -** -** The azMerge[] and aFile[] arrays are zeroed by allocated space is -** retained unless the freeAll flag is set. -*/ -static void import_reset(int freeAll){ - int i; - gg.xFinish = 0; - fossil_free(gg.zTag); gg.zTag = 0; - fossil_free(gg.zBranch); gg.zBranch = 0; - fossil_free(gg.aData); gg.aData = 0; - fossil_free(gg.zMark); gg.zMark = 0; - fossil_free(gg.zDate); gg.zDate = 0; - fossil_free(gg.zUser); gg.zUser = 0; - fossil_free(gg.zComment); gg.zComment = 0; - fossil_free(gg.zFrom); gg.zFrom = 0; - fossil_free(gg.zFromMark); gg.zFromMark = 0; - for(i=0; izName, pB->zName); -} - -/* Forward reference */ -static void import_prior_files(void); - -/* -** Use data accumulated in gg from a "commit" record to add a new -** manifest artifact to the BLOB table. -*/ -static void finish_commit(void){ - int i; - char *zFromBranch; - Blob record, cksum; - import_prior_files(); - qsort(gg.aFile, gg.nFile, sizeof(gg.aFile[0]), mfile_cmp); - blob_zero(&record); - blob_appendf(&record, "C %F\n", gg.zComment); - blob_appendf(&record, "D %s\n", gg.zDate); - for(i=0; i=gg.nFileAlloc ){ - gg.nFileAlloc = gg.nFileAlloc*2 + 100; - gg.aFile = fossil_realloc(gg.aFile, gg.nFileAlloc*sizeof(gg.aFile[0])); - } - pFile = &gg.aFile[gg.nFile++]; - memset(pFile, 0, sizeof(*pFile)); - return pFile; -} - - -/* -** Load all file information out of the gg.zFrom check-in -*/ -static void import_prior_files(void){ - Manifest *p; - int rid; - ManifestFile *pOld; - ImportFile *pNew; - if( gg.fromLoaded ) return; - gg.fromLoaded = 1; - if( gg.zFrom==0 ) return; - rid = fast_uuid_to_rid(gg.zFrom); - if( rid==0 ) return; - p = manifest_get(rid, CFTYPE_MANIFEST); - if( p==0 ) return; - manifest_file_rewind(p); - while( (pOld = manifest_file_next(p, 0))!=0 ){ - pNew = import_add_file(); - pNew->zName = import_strdup(pOld->zName); - pNew->isExe = pOld->zPerm && strstr(pOld->zPerm, "x")!=0; - pNew->zUuid = import_strdup(pOld->zUuid); - pNew->isFrom = 1; - } - manifest_destroy(p); -} - -/* -** Locate a file in the gg.aFile[] array by its name. Begin the search -** with the *pI-th file. Update *pI to be one past the file found. -** Do not search past the mx-th file. -*/ -static ImportFile *import_find_file(const char *zName, int *pI, int mx){ - int i = *pI; - int nName = strlen(zName); - while( i=0 && z[i]!='/'; i--){} - if( z[i+1]!=0 ) z += i+1; - gg.zBranch = import_strdup(z); - gg.fromLoaded = 0; - }else - if( memcmp(zLine, "tag ", 4)==0 ){ - gg.xFinish(); - gg.xFinish = finish_tag; - trim_newline(&zLine[4]); - gg.zTag = import_strdup(&zLine[4]); - }else - if( memcmp(zLine, "reset ", 4)==0 ){ - gg.xFinish(); - }else - if( memcmp(zLine, "checkpoint", 10)==0 ){ - gg.xFinish(); - }else - if( memcmp(zLine, "feature", 7)==0 ){ - gg.xFinish(); - }else - if( memcmp(zLine, "option", 6)==0 ){ - gg.xFinish(); - }else - if( memcmp(zLine, "progress ", 9)==0 ){ - gg.xFinish(); - trim_newline(&zLine[9]); - printf("%s\n", &zLine[9]); - fflush(stdout); - }else - if( memcmp(zLine, "data ", 5)==0 ){ - fossil_free(gg.aData); gg.aData = 0; - gg.nData = atoi(&zLine[5]); - if( gg.nData ){ - int got; - gg.aData = fossil_malloc( gg.nData+1 ); - got = fread(gg.aData, 1, gg.nData, pIn); - if( got!=gg.nData ){ - fossil_fatal("short read: got %d of %d bytes", got, gg.nData); - } - gg.aData[got] = 0; - if( gg.zComment==0 && gg.xFinish==finish_commit ){ - gg.zComment = gg.aData; - gg.aData = 0; - gg.nData = 0; - } - } - }else - if( memcmp(zLine, "author ", 7)==0 ){ - /* No-op */ - }else - if( memcmp(zLine, "mark ", 5)==0 ){ - trim_newline(&zLine[5]); - fossil_free(gg.zMark); - gg.zMark = import_strdup(&zLine[5]); - }else - if( memcmp(zLine, "tagger ", 7)==0 || memcmp(zLine, "committer ",10)==0 ){ - sqlite3_int64 secSince1970; - for(i=0; zLine[i] && zLine[i]!='<'; i++){} - if( zLine[i]==0 ) goto malformed_line; - z = &zLine[i+1]; - for(i=i+1; zLine[i] && zLine[i]!='>'; i++){} - if( zLine[i]==0 ) goto malformed_line; - zLine[i] = 0; - fossil_free(gg.zUser); - gg.zUser = import_strdup(z); - secSince1970 = 0; - for(i=i+2; fossil_isdigit(zLine[i]); i++){ - secSince1970 = secSince1970*10 + zLine[i] - '0'; - } - fossil_free(gg.zDate); - gg.zDate = db_text(0, "SELECT datetime(%lld, 'unixepoch')", secSince1970); - gg.zDate[10] = 'T'; - }else - if( memcmp(zLine, "from ", 5)==0 ){ - trim_newline(&zLine[5]); - fossil_free(gg.zFromMark); - gg.zFromMark = import_strdup(&zLine[5]); - fossil_free(gg.zFrom); - gg.zFrom = resolve_committish(&zLine[5]); - }else - if( memcmp(zLine, "merge ", 6)==0 ){ - trim_newline(&zLine[6]); - if( gg.nMerge>=gg.nMergeAlloc ){ - gg.nMergeAlloc = gg.nMergeAlloc*2 + 10; - gg.azMerge = fossil_realloc(gg.azMerge, gg.nMergeAlloc*sizeof(char*)); - } - gg.azMerge[gg.nMerge] = resolve_committish(&zLine[6]); - if( gg.azMerge[gg.nMerge] ) gg.nMerge++; - }else - if( memcmp(zLine, "M ", 2)==0 ){ - import_prior_files(); - z = &zLine[2]; - zPerm = next_token(&z); - zUuid = next_token(&z); - zName = next_token(&z); - i = 0; - pFile = import_find_file(zName, &i, gg.nFile); - if( pFile==0 ){ - pFile = import_add_file(); - pFile->zName = import_strdup(zName); - } - pFile->isExe = (strcmp(zPerm, "100755")==0); - fossil_free(pFile->zUuid); - pFile->zUuid = resolve_committish(zUuid); - pFile->isFrom = 0; - }else - if( memcmp(zLine, "D ", 2)==0 ){ - import_prior_files(); - z = &zLine[2]; - zName = next_token(&z); - i = 0; - while( (pFile = import_find_file(zName, &i, gg.nFile))!=0 ){ - if( pFile->isFrom==0 ) continue; - fossil_free(pFile->zName); - fossil_free(pFile->zPrior); - fossil_free(pFile->zUuid); - *pFile = gg.aFile[--gg.nFile]; - i--; - } - }else - if( memcmp(zLine, "C ", 2)==0 ){ - int nFrom; - import_prior_files(); - z = &zLine[2]; - zFrom = next_token(&z); - zTo = next_token(&z); - i = 0; - mx = gg.nFile; - nFrom = strlen(zFrom); - while( (pFile = import_find_file(zFrom, &i, mx))!=0 ){ - if( pFile->isFrom==0 ) continue; - pNew = import_add_file(); - pFile = &gg.aFile[i-1]; - if( strlen(pFile->zName)>nFrom ){ - pNew->zName = mprintf("%s%s", zTo, pFile->zName[nFrom]); - }else{ - pNew->zName = import_strdup(pFile->zName); - } - pNew->isExe = pFile->isExe; - pNew->zUuid = import_strdup(pFile->zUuid); - pNew->isFrom = 0; - } - }else - if( memcmp(zLine, "R ", 2)==0 ){ - int nFrom; - import_prior_files(); - z = &zLine[2]; - zFrom = next_token(&z); - zTo = next_token(&z); - i = 0; - nFrom = strlen(zFrom); - while( (pFile = import_find_file(zFrom, &i, gg.nFile))!=0 ){ - if( pFile->isFrom==0 ) continue; - pNew = import_add_file(); - pFile = &gg.aFile[i-1]; - if( strlen(pFile->zName)>nFrom ){ - pNew->zName = mprintf("%s%s", zTo, pFile->zName[nFrom]); - }else{ - pNew->zName = import_strdup(pFile->zName); - } - pNew->zPrior = pFile->zName; - pNew->isExe = pFile->isExe; - pNew->zUuid = pFile->zUuid; - pNew->isFrom = 0; - gg.nFile--; - *pFile = *pNew; - memset(pNew, 0, sizeof(*pNew)); - } - fossil_fatal("cannot handle R records, use --full-tree"); - }else - if( memcmp(zLine, "deleteall", 9)==0 ){ - gg.fromLoaded = 1; - }else - if( memcmp(zLine, "N ", 2)==0 ){ - /* No-op */ - }else - - { - goto malformed_line; - } - } - gg.xFinish(); - import_reset(1); - return; - -malformed_line: - trim_newline(zLine); - fossil_fatal("bad fast-import line: [%s]", zLine); - return; -} - -/* -** COMMAND: import -** -** Usage: %fossil import NEW-REPOSITORY -** -** Read text generated by the git-fast-export command and use it to -** construct a new Fossil repository named by the NEW-REPOSITORY -** argument. The get-fast-export text is read from standard input. -*/ -void git_import_cmd(void){ - char *zPassword; - FILE *pIn; - int forceFlag = find_option("force", "f", 0)!=0; - if( g.argc!=3 && g.argc!=4 ){ - usage("REPOSITORY-NAME"); - } - if( g.argc==4 ){ - pIn = fopen(g.argv[3], "rb"); - }else{ - pIn = stdin; - fossil_binary_mode(pIn); - } - if( forceFlag ) unlink(g.argv[2]); - db_create_repository(g.argv[2]); - db_open_repository(g.argv[2]); - db_open_config(0); - db_multi_exec( - "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, trid INT, tuuid TEXT);" - "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" - ); - db_begin_transaction(); - db_initial_setup(0, 0, 1); - git_fast_import(pIn); - db_end_transaction(0); - db_begin_transaction(); - printf("Rebuilding repository meta-data...\n"); - rebuild_db(0, 1); - verify_cancel(); - db_end_transaction(0); - printf("Vacuuming..."); fflush(stdout); - db_multi_exec("VACUUM"); - printf(" ok\n"); - printf("project-id: %s\n", db_get("project-code", 0)); - printf("server-id: %s\n", db_get("server-code", 0)); - zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin); - printf("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword); -} Index: src/info.c ================================================================== --- src/info.c +++ src/info.c @@ -49,16 +49,11 @@ ** * The UUID ** * The record ID ** * mtime and ctime ** * who signed it */ -void show_common_info( - int rid, /* The rid for the check-in to display info for */ - const char *zUuidName, /* Name of the UUID */ - int showComment, /* True to show the check-in comment */ - int showFamily /* True to show parents and children */ -){ +void show_common_info(int rid, const char *zUuidName, int showComment){ Stmt q; char *zComment = 0; char *zTags; char *zDate; char *zUuid; @@ -73,42 +68,38 @@ free(zUuid); free(zDate); } if( zUuid && showComment ){ zComment = db_text(0, - "SELECT coalesce(ecomment,comment) || " - " ' (user: ' || coalesce(euser,user,'?') || ')' " - " FROM event WHERE objid=%d", + "SELECT coalesce(ecomment,comment) || ' (user: ' || coalesce(euser,user,'?') || ')' FROM event WHERE objid=%d", rid ); } - if( showFamily ){ - db_prepare(&q, "SELECT uuid, pid FROM plink JOIN blob ON pid=rid " - " WHERE cid=%d", rid); - while( db_step(&q)==SQLITE_ROW ){ - const char *zUuid = db_column_text(&q, 0); - zDate = db_text("", - "SELECT datetime(mtime) || ' UTC' FROM event WHERE objid=%d", - db_column_int(&q, 1) - ); - printf("parent: %s %s\n", zUuid, zDate); - free(zDate); - } - db_finalize(&q); - db_prepare(&q, "SELECT uuid, cid FROM plink JOIN blob ON cid=rid " - " WHERE pid=%d", rid); - while( db_step(&q)==SQLITE_ROW ){ - const char *zUuid = db_column_text(&q, 0); - zDate = db_text("", - "SELECT datetime(mtime) || ' UTC' FROM event WHERE objid=%d", - db_column_int(&q, 1) - ); - printf("child: %s %s\n", zUuid, zDate); - free(zDate); - } - db_finalize(&q); - } + db_prepare(&q, "SELECT uuid, pid FROM plink JOIN blob ON pid=rid " + " WHERE cid=%d", rid); + while( db_step(&q)==SQLITE_ROW ){ + const char *zUuid = db_column_text(&q, 0); + zDate = db_text("", + "SELECT datetime(mtime) || ' UTC' FROM event WHERE objid=%d", + db_column_int(&q, 1) + ); + printf("parent: %s %s\n", zUuid, zDate); + free(zDate); + } + db_finalize(&q); + db_prepare(&q, "SELECT uuid, cid FROM plink JOIN blob ON cid=rid " + " WHERE pid=%d", rid); + while( db_step(&q)==SQLITE_ROW ){ + const char *zUuid = db_column_text(&q, 0); + zDate = db_text("", + "SELECT datetime(mtime) || ' UTC' FROM event WHERE objid=%d", + db_column_int(&q, 1) + ); + printf("child: %s %s\n", zUuid, zDate); + free(zDate); + } + db_finalize(&q); zTags = info_tags_of_checkin(rid, 0); if( zTags && zTags[0] ){ printf("tags: %s\n", zTags); } free(zTags); @@ -162,19 +153,19 @@ printf("server-code: %s\n", db_get("server-code", "")); vid = db_lget_int("checkout", 0); if( vid==0 ){ printf("checkout: nil\n"); }else{ - show_common_info(vid, "checkout:", 1, 1); + show_common_info(vid, "checkout:", 1); } }else{ int rid; rid = name_to_rid(g.argv[2]); if( rid==0 ){ fossil_panic("no such object: %s\n", g.argv[2]); } - show_common_info(rid, "uuid:", 1, 1); + show_common_info(rid, "uuid:", 1); } } /* ** Show information about all tags on a given node. Index: src/login.c ================================================================== --- src/login.c +++ src/login.c @@ -51,14 +51,15 @@ ** Return the name of the login cookie */ static char *login_cookie_name(void){ static char *zCookieName = 0; if( zCookieName==0 ){ - unsigned int h = 0; - const char *z = g.zBaseURL; - while( *z ){ h = (h<<3) ^ (h>>26) ^ *(z++); } - zCookieName = mprintf("fossil_login_%08x", h); + int n = strlen(g.zTop); + zCookieName = fossil_malloc( n*2+16 ); + /* 0123456789 12345 */ + strcpy(zCookieName, "fossil_login_"); + encode16((unsigned char*)g.zTop, (unsigned char*)&zCookieName[13], n); } return zCookieName; } /* @@ -458,11 +459,11 @@ if( g.zLogin && once ){ const char *zCap; /* All logged-in users inherit privileges from "nobody" */ zCap = db_text("", "SELECT cap FROM user WHERE login = 'nobody'"); login_set_capabilities(zCap); - if( strcmp(g.zLogin, "nobody")!=0 ){ + if( strcmp(g.zLogin, "anonymous")!=0 ){ /* All logged-in users inherit privileges from "anonymous" */ zCap = db_text("", "SELECT cap FROM user WHERE login = 'anonymous'"); login_set_capabilities(zCap); } once = 0; Index: src/main.c ================================================================== --- src/main.c +++ src/main.c @@ -228,11 +228,11 @@ sqlite3_config(SQLITE_CONFIG_LOG, fossil_sqlite_log, 0); g.now = time(0); g.argc = argc; g.argv = argv; - if( getenv("GATEWAY_INTERFACE")!=0 && !find_option("nocgi", 0, 0)){ + if( getenv("GATEWAY_INTERFACE")!=0 ){ zCmdName = "cgi"; }else if( argc<2 ){ fprintf(stderr, "Usage: %s COMMAND ...\n" "\"%s help\" for a list of available commands\n" "\"%s help COMMAND\" for specific details\n", @@ -269,21 +269,10 @@ ** or a panic. If additional "recursive-fatal" errors occur while ** shutting down, the recursive errors are silently ignored. */ static int mainInFatalError = 0; -/* -** Return the name of the current executable. -*/ -const char *fossil_nameofexe(void){ -#ifdef _WIN32 - return _pgmptr; -#else - return g.argv[0]; -#endif -} - /* ** Exit. Take care to close the database first. */ void fossil_exit(int rc){ db_close(); @@ -305,11 +294,11 @@ if( g.cgiOutput && once ){ once = 0; cgi_printf("

%h

", z); cgi_reply(); }else{ - fprintf(stderr, "%s: %s\n", fossil_nameofexe(), z); + fprintf(stderr, "%s: %s\n", g.argv[0], z); } db_force_rollback(); fossil_exit(1); } void fossil_fatal(const char *zFormat, ...){ @@ -322,11 +311,11 @@ if( g.cgiOutput ){ g.cgiOutput = 0; cgi_printf("

%h

", z); cgi_reply(); }else{ - fprintf(stderr, "\r%s: %s\n", fossil_nameofexe(), z); + fprintf(stderr, "%s: %s\n", g.argv[0], z); } db_force_rollback(); fossil_exit(1); } @@ -350,11 +339,11 @@ if( g.cgiOutput ){ g.cgiOutput = 0; cgi_printf("

%h

", z); cgi_reply(); }else{ - fprintf(stderr, "\r%s: %s\n", fossil_nameofexe(), z); + fprintf(stderr, "%s: %s\n", g.argv[0], z); } db_force_rollback(); fossil_exit(1); } @@ -367,11 +356,11 @@ z = vmprintf(zFormat, ap); va_end(ap); if( g.cgiOutput ){ cgi_printf("

%h

", z); }else{ - fprintf(stderr, "\r%s: %s\n", fossil_nameofexe(), z); + fprintf(stderr, "%s: %s\n", g.argv[0], z); } } /* ** Malloc and free routines that cannot fail @@ -387,41 +376,10 @@ void *fossil_realloc(void *p, size_t n){ p = realloc(p, n); if( p==0 ) fossil_panic("out of memory"); return p; } - -/* -** This function implements a cross-platform "system()" interface. -*/ -int fossil_system(const char *zOrigCmd){ - int rc; -#if defined(_WIN32) - /* On windows, we have to put double-quotes around the entire command. - ** Who knows why - this is just the way windows works. - */ - char *zNewCmd = mprintf("\"%s\"", zOrigCmd); - rc = system(zNewCmd); - free(zNewCmd); -#else - /* On unix, evaluate the command directly. - */ - rc = system(zOrigCmd); -#endif - return rc; -} - -/* -** Turn off any NL to CRNL translation on the stream given as an -** argument. This is a no-op on unix but is necessary on windows. -*/ -void fossil_binary_mode(FILE *p){ -#if defined(_WIN32) - _setmode(_fileno(p), _O_BINARY); -#endif -} - /* ** Return a name for an SQLite error code */ @@ -464,11 +422,11 @@ /* ** Print a usage comment and quit */ void usage(const char *zFormat){ - fprintf(stderr, "Usage: %s %s %s\n", fossil_nameofexe(), g.argv[1], zFormat); + fprintf(stderr, "Usage: %s %s %s\n", g.argv[0], g.argv[1], zFormat); fossil_exit(1); } /* ** Remove n elements from g.argv beginning with the i-th element. @@ -624,12 +582,11 @@ */ void help_cmd(void){ int rc, idx; const char *z; if( g.argc!=3 ){ - printf("Usage: %s help COMMAND.\nAvailable COMMANDs:\n", - fossil_nameofexe()); + printf("Usage: %s help COMMAND.\nAvailable COMMANDs:\n", g.argv[0]); cmd_cmd_list(); version_cmd(); return; } rc = name_search(g.argv[2], aCommand, count(aCommand), &idx); @@ -643,11 +600,11 @@ fossil_fatal("no help available for the %s command", aCommand[idx].zName); } while( *z ){ if( *z=='%' && strncmp(z, "%fossil", 7)==0 ){ - printf("%s", fossil_nameofexe()); + printf("%s", g.argv[0]); z += 7; }else{ putchar(*z); z++; } Index: src/main.mk ================================================================== --- src/main.mk +++ src/main.mk @@ -36,19 +36,17 @@ $(SRCDIR)/diff.c \ $(SRCDIR)/diffcmd.c \ $(SRCDIR)/doc.c \ $(SRCDIR)/encode.c \ $(SRCDIR)/event.c \ - $(SRCDIR)/export.c \ $(SRCDIR)/file.c \ $(SRCDIR)/finfo.c \ $(SRCDIR)/graph.c \ $(SRCDIR)/http.c \ $(SRCDIR)/http_socket.c \ $(SRCDIR)/http_ssl.c \ $(SRCDIR)/http_transport.c \ - $(SRCDIR)/import.c \ $(SRCDIR)/info.c \ $(SRCDIR)/login.c \ $(SRCDIR)/main.c \ $(SRCDIR)/manifest.c \ $(SRCDIR)/md5.c \ @@ -112,19 +110,17 @@ diff_.c \ diffcmd_.c \ doc_.c \ encode_.c \ event_.c \ - export_.c \ file_.c \ finfo_.c \ graph_.c \ http_.c \ http_socket_.c \ http_ssl_.c \ http_transport_.c \ - import_.c \ info_.c \ login_.c \ main_.c \ manifest_.c \ md5_.c \ @@ -188,19 +184,17 @@ $(OBJDIR)/diff.o \ $(OBJDIR)/diffcmd.o \ $(OBJDIR)/doc.o \ $(OBJDIR)/encode.o \ $(OBJDIR)/event.o \ - $(OBJDIR)/export.o \ $(OBJDIR)/file.o \ $(OBJDIR)/finfo.o \ $(OBJDIR)/graph.o \ $(OBJDIR)/http.o \ $(OBJDIR)/http_socket.o \ $(OBJDIR)/http_ssl.o \ $(OBJDIR)/http_transport.o \ - $(OBJDIR)/import.o \ $(OBJDIR)/info.o \ $(OBJDIR)/login.o \ $(OBJDIR)/main.o \ $(OBJDIR)/manifest.o \ $(OBJDIR)/md5.o \ @@ -284,16 +278,16 @@ # noop clean: rm -f $(OBJDIR)/*.o *_.c $(APPNAME) VERSION.h rm -f translate makeheaders mkindex page_index.h headers - rm -f add.h allrepo.h attach.h bag.h blob.h branch.h browse.h captcha.h cgi.h checkin.h checkout.h clearsign.h clone.h comformat.h configure.h content.h db.h delta.h deltacmd.h descendants.h diff.h diffcmd.h doc.h encode.h event.h export.h file.h finfo.h graph.h http.h http_socket.h http_ssl.h http_transport.h import.h info.h login.h main.h manifest.h md5.h merge.h merge3.h name.h pivot.h popen.h pqueue.h printf.h rebuild.h report.h rss.h schema.h search.h setup.h sha1.h shun.h skins.h stat.h style.h sync.h tag.h th_main.h timeline.h tkt.h tktsetup.h undo.h update.h url.h user.h verify.h vfile.h wiki.h wikiformat.h winhttp.h xfer.h zip.h + rm -f add.h allrepo.h attach.h bag.h blob.h branch.h browse.h captcha.h cgi.h checkin.h checkout.h clearsign.h clone.h comformat.h configure.h content.h db.h delta.h deltacmd.h descendants.h diff.h diffcmd.h doc.h encode.h event.h file.h finfo.h graph.h http.h http_socket.h http_ssl.h http_transport.h info.h login.h main.h manifest.h md5.h merge.h merge3.h name.h pivot.h popen.h pqueue.h printf.h rebuild.h report.h rss.h schema.h search.h setup.h sha1.h shun.h skins.h stat.h style.h sync.h tag.h th_main.h timeline.h tkt.h tktsetup.h undo.h update.h url.h user.h verify.h vfile.h wiki.h wikiformat.h winhttp.h xfer.h zip.h page_index.h: $(TRANS_SRC) mkindex ./mkindex $(TRANS_SRC) >$@ headers: page_index.h makeheaders VERSION.h - ./makeheaders add_.c:add.h allrepo_.c:allrepo.h attach_.c:attach.h bag_.c:bag.h blob_.c:blob.h branch_.c:branch.h browse_.c:browse.h captcha_.c:captcha.h cgi_.c:cgi.h checkin_.c:checkin.h checkout_.c:checkout.h clearsign_.c:clearsign.h clone_.c:clone.h comformat_.c:comformat.h configure_.c:configure.h content_.c:content.h db_.c:db.h delta_.c:delta.h deltacmd_.c:deltacmd.h descendants_.c:descendants.h diff_.c:diff.h diffcmd_.c:diffcmd.h doc_.c:doc.h encode_.c:encode.h event_.c:event.h export_.c:export.h file_.c:file.h finfo_.c:finfo.h graph_.c:graph.h http_.c:http.h http_socket_.c:http_socket.h http_ssl_.c:http_ssl.h http_transport_.c:http_transport.h import_.c:import.h info_.c:info.h login_.c:login.h main_.c:main.h manifest_.c:manifest.h md5_.c:md5.h merge_.c:merge.h merge3_.c:merge3.h name_.c:name.h pivot_.c:pivot.h popen_.c:popen.h pqueue_.c:pqueue.h printf_.c:printf.h rebuild_.c:rebuild.h report_.c:report.h rss_.c:rss.h schema_.c:schema.h search_.c:search.h setup_.c:setup.h sha1_.c:sha1.h shun_.c:shun.h skins_.c:skins.h stat_.c:stat.h style_.c:style.h sync_.c:sync.h tag_.c:tag.h th_main_.c:th_main.h timeline_.c:timeline.h tkt_.c:tkt.h tktsetup_.c:tktsetup.h undo_.c:undo.h update_.c:update.h url_.c:url.h user_.c:user.h verify_.c:verify.h vfile_.c:vfile.h wiki_.c:wiki.h wikiformat_.c:wikiformat.h winhttp_.c:winhttp.h xfer_.c:xfer.h zip_.c:zip.h $(SRCDIR)/sqlite3.h $(SRCDIR)/th.h VERSION.h + ./makeheaders add_.c:add.h allrepo_.c:allrepo.h attach_.c:attach.h bag_.c:bag.h blob_.c:blob.h branch_.c:branch.h browse_.c:browse.h captcha_.c:captcha.h cgi_.c:cgi.h checkin_.c:checkin.h checkout_.c:checkout.h clearsign_.c:clearsign.h clone_.c:clone.h comformat_.c:comformat.h configure_.c:configure.h content_.c:content.h db_.c:db.h delta_.c:delta.h deltacmd_.c:deltacmd.h descendants_.c:descendants.h diff_.c:diff.h diffcmd_.c:diffcmd.h doc_.c:doc.h encode_.c:encode.h event_.c:event.h file_.c:file.h finfo_.c:finfo.h graph_.c:graph.h http_.c:http.h http_socket_.c:http_socket.h http_ssl_.c:http_ssl.h http_transport_.c:http_transport.h info_.c:info.h login_.c:login.h main_.c:main.h manifest_.c:manifest.h md5_.c:md5.h merge_.c:merge.h merge3_.c:merge3.h name_.c:name.h pivot_.c:pivot.h popen_.c:popen.h pqueue_.c:pqueue.h printf_.c:printf.h rebuild_.c:rebuild.h report_.c:report.h rss_.c:rss.h schema_.c:schema.h search_.c:search.h setup_.c:setup.h sha1_.c:sha1.h shun_.c:shun.h skins_.c:skins.h stat_.c:stat.h style_.c:style.h sync_.c:sync.h tag_.c:tag.h th_main_.c:th_main.h timeline_.c:timeline.h tkt_.c:tkt.h tktsetup_.c:tktsetup.h undo_.c:undo.h update_.c:update.h url_.c:url.h user_.c:user.h verify_.c:verify.h vfile_.c:vfile.h wiki_.c:wiki.h wikiformat_.c:wikiformat.h winhttp_.c:winhttp.h xfer_.c:xfer.h zip_.c:zip.h $(SRCDIR)/sqlite3.h $(SRCDIR)/th.h VERSION.h touch headers headers: Makefile Makefile: add_.c: $(SRCDIR)/add.c translate ./translate $(SRCDIR)/add.c >add_.c @@ -468,17 +462,10 @@ $(OBJDIR)/event.o: event_.c event.h $(SRCDIR)/config.h $(XTCC) -o $(OBJDIR)/event.o -c event_.c event.h: headers -export_.c: $(SRCDIR)/export.c translate - ./translate $(SRCDIR)/export.c >export_.c - -$(OBJDIR)/export.o: export_.c export.h $(SRCDIR)/config.h - $(XTCC) -o $(OBJDIR)/export.o -c export_.c - -export.h: headers file_.c: $(SRCDIR)/file.c translate ./translate $(SRCDIR)/file.c >file_.c $(OBJDIR)/file.o: file_.c file.h $(SRCDIR)/config.h $(XTCC) -o $(OBJDIR)/file.o -c file_.c @@ -524,17 +511,10 @@ $(OBJDIR)/http_transport.o: http_transport_.c http_transport.h $(SRCDIR)/config.h $(XTCC) -o $(OBJDIR)/http_transport.o -c http_transport_.c http_transport.h: headers -import_.c: $(SRCDIR)/import.c translate - ./translate $(SRCDIR)/import.c >import_.c - -$(OBJDIR)/import.o: import_.c import.h $(SRCDIR)/config.h - $(XTCC) -o $(OBJDIR)/import.o -c import_.c - -import.h: headers info_.c: $(SRCDIR)/info.c translate ./translate $(SRCDIR)/info.c >info_.c $(OBJDIR)/info.o: info_.c info.h $(SRCDIR)/config.h $(XTCC) -o $(OBJDIR)/info.o -c info_.c Index: src/makemake.tcl ================================================================== --- src/makemake.tcl +++ src/makemake.tcl @@ -30,18 +30,16 @@ diff diffcmd doc encode event - export file finfo graph http http_socket http_transport - import info login main manifest md5 Index: src/manifest.c ================================================================== --- src/manifest.c +++ src/manifest.c @@ -104,15 +104,10 @@ int nxAge; int aAge[MX_MANIFEST_CACHE]; Manifest *apManifest[MX_MANIFEST_CACHE]; } manifestCache; -/* -** True if manifest_crosslink_begin() has been called but -** manifest_crosslink_end() is still pending. -*/ -static int manifest_crosslink_busy = 0; /* ** Clear the memory allocated in a manifest object */ void manifest_destroy(Manifest *p){ @@ -190,30 +185,19 @@ #ifdef FOSSIL_DONT_VERIFY_MANIFEST_MD5SUM # define md5sum_init(X) # define md5sum_step_text(X,Y) #endif -/* -** Return true if z points to the first character after a blank line. -** Tolerate either \r\n or \n line endings. -*/ -static int after_blank_line(const char *z){ - if( z[-1]!='\n' ) return 0; - if( z[-2]=='\n' ) return 1; - if( z[-2]=='\r' && z[-3]=='\n' ) return 1; - return 0; -} - /* ** Remove the PGP signature from the artifact, if there is one. */ static void remove_pgp_signature(char **pz, int *pn){ char *z = *pz; int n = *pn; int i; if( memcmp(z, "-----BEGIN PGP SIGNED MESSAGE-----", 34)!=0 ) return; - for(i=34; i=n ) return; z += i; n -= i; *pz = z; for(i=n-1; i>=0; i--){ @@ -840,10 +824,11 @@ if( !seenZ ) goto manifest_syntax_error; p->type = CFTYPE_ATTACHMENT; }else{ if( p->nCChild>0 ) goto manifest_syntax_error; if( p->rDate<=0.0 ) goto manifest_syntax_error; + if( p->nParent>0 ) goto manifest_syntax_error; if( p->nField>0 ) goto manifest_syntax_error; if( p->zTicketUuid ) goto manifest_syntax_error; if( p->zWikiTitle ) goto manifest_syntax_error; if( p->zTicketUuid ) goto manifest_syntax_error; p->type = CFTYPE_MANIFEST; @@ -1225,20 +1210,10 @@ if( (pParent->zBaseline==0)==(pChild->zBaseline==0) ){ content_deltify(pid, cid, 0); }else if( pChild->zBaseline==0 && pParent->zBaseline!=0 ){ content_deltify(pParent->pBaseline->rid, cid, 0); } - - /* Remember all children less than 2 seconds younger than their parent, - ** as we might want to fudge the times for those children. - */ - if( pChild->rDaterDate+2.3e-5 && manifest_crosslink_busy ){ - db_multi_exec( - "INSERT OR REPLACE INTO time_fudge VALUES(%d, %.17g, %d, %.17g);", - pParent->rid, pParent->rDate, pChild->rid, pChild->rDate - ); - } for(i=0, pChildFile=pChild->aFile; inFile; i++, pChildFile++){ if( pChildFile->zPrior ){ pParentFile = manifest_file_seek(pParent, pChildFile->zPrior); if( pParentFile ){ @@ -1263,74 +1238,44 @@ pChildFile = manifest_file_seek(pChild, pParentFile->zName); if( pChildFile ){ add_one_mlink(cid, 0, pChildFile->zUuid, pChildFile->zName, 0); } } - }else if( pChild->zBaseline==0 ){ - manifest_file_rewind(pParent); - while( (pParentFile = manifest_file_next(pParent,0))!=0 ){ - pChildFile = manifest_file_seek(pChild, pParentFile->zName); - if( pChildFile==0 ){ - add_one_mlink(cid, pParentFile->zUuid, 0, pParentFile->zName, 0); - } - } } manifest_cache_insert(*ppOther); } +/* +** True if manifest_crosslink_begin() has been called but +** manifest_crosslink_end() is still pending. +*/ +static int manifest_crosslink_busy = 0; + /* ** Setup to do multiple manifest_crosslink() calls. ** This is only required if processing ticket changes. */ void manifest_crosslink_begin(void){ assert( manifest_crosslink_busy==0 ); manifest_crosslink_busy = 1; db_begin_transaction(); - db_multi_exec( - "CREATE TEMP TABLE pending_tkt(uuid TEXT UNIQUE);" - "CREATE TEMP TABLE time_fudge(" - " mid INTEGER PRIMARY KEY," - " m1 REAL," - " cid INTEGER," - " m2 REAL" - ");" - ); + db_multi_exec("CREATE TEMP TABLE pending_tkt(uuid TEXT UNIQUE)"); } /* ** Finish up a sequence of manifest_crosslink calls. */ void manifest_crosslink_end(void){ - Stmt q, u; - int i; + Stmt q; assert( manifest_crosslink_busy==1 ); db_prepare(&q, "SELECT uuid FROM pending_tkt"); while( db_step(&q)==SQLITE_ROW ){ const char *zUuid = db_column_text(&q, 0); ticket_rebuild_entry(zUuid); } db_finalize(&q); db_multi_exec("DROP TABLE pending_tkt"); - - db_prepare(&q, "UPDATE time_fudge SET m1=m2-2.8935e-7 WHERE m1>=m2"); - db_prepare(&u, "UPDATE time_fudge SET m2=" - "(SELECT x.m1 FROM time_fudge AS x WHERE x.mid=time_fudge.cid)"); - for(i=0; i<30; i++){ - db_step(&q); - db_reset(&q); - if( sqlite3_changes(g.db)==0 ) break; - db_step(&u); - db_reset(&u); - } - db_finalize(&q); - db_finalize(&u); - db_multi_exec( - "UPDATE event SET mtime=(SELECT m1 FROM time_fudge WHERE mid=objid)" - " WHERE objid IN (SELECT mid FROM time_fudge);" - "DROP TABLE time_fudge;" - ); - db_end_transaction(0); manifest_crosslink_busy = 0; } /* @@ -1471,15 +1416,10 @@ while( db_step(&q)==SQLITE_ROW ){ int cid = db_column_int(&q, 0); add_mlink(rid, p, cid, 0); } db_finalize(&q); - if( p->nParent==0 ){ - for(i=0; inFile; i++){ - add_one_mlink(rid, 0, p->aFile[i].zUuid, p->aFile[i].zName, 0); - } - } db_multi_exec( "REPLACE INTO event(type,mtime,objid,user,comment," "bgcolor,euser,ecomment)" "VALUES('ci'," " coalesce(" Index: src/merge.c ================================================================== --- src/merge.c +++ src/merge.c @@ -73,11 +73,11 @@ } mid = name_to_rid(g.argv[2]); if( mid==0 ){ fossil_fatal("not a version: %s", g.argv[2]); } - if( !is_a_version(mid) ){ + if( mid>1 && !db_exists("SELECT 1 FROM plink WHERE cid=%d", mid) ){ fossil_fatal("not a version: %s", g.argv[2]); } if( pickFlag || backoutFlag ){ pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid); if( pid<=0 ){ @@ -96,16 +96,16 @@ pivot_set_secondary(db_column_int(&q,0)); } db_finalize(&q); pid = pivot_find(); if( pid<=0 ){ - fossil_fatal("cannot find a common ancestor between the current " + fossil_fatal("cannot find a common ancestor between the current" "checkout and %s", g.argv[2]); } } - if( !is_a_version(pid) ){ - fossil_fatal("not a version: record #%d", pid); + if( pid>1 && !db_exists("SELECT 1 FROM plink WHERE cid=%d", pid) ){ + fossil_fatal("not a version: record #%d", mid); } vfile_check_signature(vid, 1); db_begin_transaction(); undo_begin(); load_vfile_from_rid(mid); Index: src/merge3.c ================================================================== --- src/merge3.c +++ src/merge3.c @@ -302,11 +302,12 @@ ** from PIVOT->VERSION2 and write the combined changes into MERGED. */ void delta_3waymerge_cmd(void){ Blob pivot, v1, v2, merged; if( g.argc!=6 ){ - usage("PIVOT V1 V2 MERGED"); + fprintf(stderr,"Usage: %s %s PIVOT V1 V2 MERGED\n", g.argv[0], g.argv[1]); + fossil_exit(1); } if( blob_read_from_file(&pivot, g.argv[2])<0 ){ fprintf(stderr,"cannot read %s\n", g.argv[2]); fossil_exit(1); } Index: src/name.c ================================================================== --- src/name.c +++ src/name.c @@ -142,11 +142,10 @@ ** ** Memory to hold the returned string comes from malloc() and needs to ** be freed by the caller. */ char *tag_to_uuid(const char *zTag){ - int vid; char *zUuid = db_text(0, "SELECT blob.uuid" " FROM tag, tagxref, event, blob" " WHERE tag.tagname='sym-'||%Q " @@ -191,24 +190,10 @@ " WHERE event.type='ci'" " AND blob.rid=event.objid" " ORDER BY event.mtime DESC" ); } - if( zUuid==0 && g.localOpen && (vid=db_lget_int("checkout",0))!=0 ){ - if( strcmp(zTag, "current")==0 ){ - zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid); - }else if( strcmp(zTag, "prev")==0 || strcmp(zTag, "previous")==0 ){ - zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=" - "(SELECT pid FROM plink WHERE cid=%d AND isprim)", - vid); - }else if( strcmp(zTag, "next")==0 ){ - zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=" - "(SELECT cid FROM plink WHERE pid=%d AND isprim" - " ORDER BY mtime DESC)", - vid); - } - } } return zUuid; } /* Index: src/pivot.c ================================================================== --- src/pivot.c +++ src/pivot.c @@ -50,11 +50,11 @@ ); /* Insert the primary record */ db_multi_exec( "INSERT INTO aqueue(rid, mtime, pending, src)" - " SELECT %d, mtime, 1, 1 FROM event WHERE objid=%d AND type='ci' LIMIT 1", + " SELECT %d, mtime, 1, 1 FROM plink WHERE cid=%d LIMIT 1", rid, rid ); } /* @@ -64,11 +64,11 @@ */ void pivot_set_secondary(int rid){ /* Insert the primary record */ db_multi_exec( "INSERT OR IGNORE INTO aqueue(rid, mtime, pending, src)" - " SELECT %d, mtime, 1, 0 FROM event WHERE objid=%d AND type='ci'", + " SELECT %d, mtime, 1, 0 FROM plink WHERE cid=%d", rid, rid ); } /* Index: src/rebuild.c ================================================================== --- src/rebuild.c +++ src/rebuild.c @@ -352,13 +352,11 @@ */ void rebuild_database(void){ int forceFlag; int randomizeFlag; int errCnt; - int omitVerify; - omitVerify = find_option("noverify",0,0)!=0; forceFlag = find_option("force","f",0)!=0; randomizeFlag = find_option("randomize", 0, 0)!=0; if( g.argc==3 ){ db_open_repository(g.argv[2]); }else{ @@ -375,11 +373,10 @@ if( errCnt && !forceFlag ){ printf("%d errors. Rolling back changes. Use --force to force a commit.\n", errCnt); db_end_transaction(1); }else{ - if( omitVerify ) verify_cancel(); db_end_transaction(0); } } /* @@ -576,11 +573,11 @@ /* ** COMMAND: deconstruct ** ** Usage %fossil deconstruct ?-R|--repository REPOSITORY? ?-L|--prefixlength N? DESTINATION ** -** This command exports all artifacts of a given repository and +** This command exports all artifacts of o given repository and ** writes all artifacts to the file system. The DESTINATION directory ** will be populated with subdirectories AA and files AA/BBBBBBBBB.., where ** AABBBBBBBBB.. is the 40 character artifact ID, AA the first 2 characters. ** If -L|--prefixlength is given, the length (default 2) of the directory ** prefix can be set to 0,1,..,9 characters. Index: src/report.c ================================================================== --- src/report.c +++ src/report.c @@ -239,13 +239,10 @@ sqlite3_set_authorizer(g.db, report_query_authorizer, (void*)&zErr); rc = sqlite3_prepare(g.db, zSql, -1, &pStmt, &zTail); if( rc!=SQLITE_OK ){ zErr = mprintf("Syntax error: %s", sqlite3_errmsg(g.db)); } - if( !sqlite3_stmt_readonly(pStmt) ){ - zErr = mprintf("SQL must not modify the database"); - } if( pStmt ){ sqlite3_finalize(pStmt); } sqlite3_set_authorizer(g.db, 0, 0); return zErr; @@ -818,65 +815,10 @@ @ } @
} -/* -** Execute a single read-only SQL statement. Invoke xCallback() on each -** row. -*/ -int sqlite3_exec_readonly( - sqlite3 *db, /* The database on which the SQL executes */ - const char *zSql, /* The SQL to be executed */ - sqlite3_callback xCallback, /* Invoke this callback routine */ - void *pArg, /* First argument to xCallback() */ - char **pzErrMsg /* Write error messages here */ -){ - int rc = SQLITE_OK; /* Return code */ - const char *zLeftover; /* Tail of unprocessed SQL */ - sqlite3_stmt *pStmt = 0; /* The current SQL statement */ - char **azCols = 0; /* Names of result columns */ - int nCol; /* Number of columns of output */ - char **azVals = 0; /* Text of all output columns */ - int i; /* Loop counter */ - - pStmt = 0; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &zLeftover); - assert( rc==SQLITE_OK || pStmt==0 ); - if( rc!=SQLITE_OK ){ - return rc; - } - if( !pStmt ){ - /* this happens for a comment or white-space */ - return SQLITE_OK; - } - if( !sqlite3_stmt_readonly(pStmt) ){ - sqlite3_finalize(pStmt); - return SQLITE_ERROR; - } - - nCol = sqlite3_column_count(pStmt); - azVals = fossil_malloc(2*nCol*sizeof(const char*) + 1); - while( (rc = sqlite3_step(pStmt))==SQLITE_ROW ){ - if( azCols==0 ){ - azCols = &azVals[nCol]; - for(i=0; i sState.rn = rn; sState.nCount = 0; sqlite3_set_authorizer(g.db, report_query_authorizer, (void*)&zErr1); - sqlite3_exec_readonly(g.db, zSql, generate_html, &sState, &zErr2); + sqlite3_exec(g.db, zSql, generate_html, &sState, &zErr2); sqlite3_set_authorizer(g.db, 0, 0); @ if( zErr1 ){ @

Error: %h(zErr1)

}else if( zErr2 ){ @@ -966,11 +908,11 @@ @

Error: %h(zErr2)

} style_footer(); }else{ sqlite3_set_authorizer(g.db, report_query_authorizer, (void*)&zErr1); - sqlite3_exec_readonly(g.db, zSql, output_tab_separated, &count, &zErr2); + sqlite3_exec(g.db, zSql, output_tab_separated, &count, &zErr2); sqlite3_set_authorizer(g.db, 0, 0); cgi_set_content_type("text/plain"); } } @@ -1128,11 +1070,11 @@ } count = 0; tktEncode = enc; zSep = zSepIn; sqlite3_set_authorizer(g.db, report_query_authorizer, (void*)&zErr1); - sqlite3_exec_readonly(g.db, zSql, output_separated_file, &count, &zErr2); + sqlite3_exec(g.db, zSql, output_separated_file, &count, &zErr2); sqlite3_set_authorizer(g.db, 0, 0); if( zFilter ){ free(zSql); } } Index: src/setup.c ================================================================== --- src/setup.c +++ src/setup.c @@ -694,11 +694,11 @@ } if( iVal ){ @ @ %s(zLabel) }else{ - @ %s(zLabel) + @ %s(zLabel) } } /* ** Generate an entry box for an attribute. @@ -877,11 +877,11 @@ login_insert_csrf_secret(); for(pSet=ctrlSettings; pSet->name!=0; pSet++){ if( pSet->width==0 ){ onoff_attribute(pSet->name, pSet->name, pSet->var!=0 ? pSet->var : pSet->name, - is_truth(pSet->def)); + pSet->def[0]=='1'); @
} } @ for(pSet=ctrlSettings; pSet->name!=0; pSet++){ @@ -1087,11 +1087,11 @@ /* ** WEBPAGE: setup_logo */ void setup_logo(void){ - const char *zMime = db_get("logo-mimetype","image/gif"); + const char *zMime = "image/gif"; const char *aImg = P("im"); int szImg = atoi(PD("im:bytes","0")); if( szImg>0 ){ zMime = PD("im:mimetype","image/gif"); } Index: src/sqlite3.c ================================================================== --- src/sqlite3.c +++ src/sqlite3.c @@ -1,12 +1,12 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.7.4. By combining all the individual C code files into this +** version 3.7.3. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a one translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements -** of 5% or more are commonly seen when SQLite is compiled as a single +** of 5% are more are commonly seen when SQLite is compiled as a single ** translation unit. ** ** This file is all you need to compile SQLite. To use SQLite in other ** programs, you need this file and the "sqlite3.h" header file that defines ** the programming interface to the SQLite library. (If you do not have @@ -648,13 +648,13 @@ ** ** See also: [sqlite3_libversion()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.7.4" -#define SQLITE_VERSION_NUMBER 3007004 -#define SQLITE_SOURCE_ID "2010-11-16 23:10:26 fd5b2f23dd5111d2f0934dd828bae36b755024c1" +#define SQLITE_VERSION "3.7.3" +#define SQLITE_VERSION_NUMBER 3007003 +#define SQLITE_SOURCE_ID "2010-10-07 13:29:13 e55ada89246d4cc5f476891c70572dc7c1c3643e" /* ** CAPI3REF: Run-Time Library Version Numbers ** KEYWORDS: sqlite3_version, sqlite3_sourceid ** @@ -3174,25 +3174,10 @@ ** SQL text used to create a [prepared statement] if that statement was ** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. */ SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); -/* -** CAPI3REF: Determine If An SQL Statement Writes The Database -** -** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if -** the [prepared statement] X is guaranteed to leave the database file -** unmodified. ^If the sqlite3_stmt_readonly(X) interface returns false (zero) -** then evaluating the statement might change the database file, but this -** is not guaranteed as the write operation might be conditional and the -** condition might not be met. ^If X is a NULL pointer then -** sqlite3_stmt_readonly(X) returns true. If X is a non-NULL pointer but -** is not a pointer to a valid, unfinalized prepared statement, then the -** behavior is undefined and probably harmful. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); - /* ** CAPI3REF: Dynamically Typed Value Object ** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} ** ** SQLite uses the sqlite3_value object to represent all values @@ -3288,14 +3273,11 @@ ** ^If the fourth parameter is negative, the length of the string is ** the number of bytes up to the first zero terminator. ** ** ^The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and ** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to sqlite3_bind_blob(), -** sqlite3_bind_text(), or sqlite3_bind_text16() fails. -** ^If the fifth argument is +** string after SQLite has finished with it. ^If the fifth argument is ** the special value [SQLITE_STATIC], then SQLite assumes that the ** information is in static, unmanaged space and does not need to be freed. ** ^If the fifth argument has the value [SQLITE_TRANSIENT], then ** SQLite makes its own private copy of the data immediately, before ** the sqlite3_bind_*() routine returns. @@ -3931,19 +3913,16 @@ ** parameters. ^An aggregate SQL function requires an implementation of xStep ** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing ** SQL function or aggregate, pass NULL poiners for all three function ** callbacks. ** -** ^(If the tenth parameter to sqlite3_create_function_v2() is not NULL, -** then it is destructor for the application data pointer. -** The destructor is invoked when the function is deleted, either by being -** overloaded or when the database connection closes.)^ -** ^The destructor is also invoked if the call to -** sqlite3_create_function_v2() fails. -** ^When the destructor callback of the tenth parameter is invoked, it -** is passed a single argument which is a copy of the application data -** pointer which was the fifth parameter to sqlite3_create_function_v2(). +** ^If the tenth parameter to sqlite3_create_function_v2() is not NULL, +** then it is invoked when the function is deleted, either by being +** overloaded or when the database connection closes. +** ^When the destructure callback of the tenth parameter is invoked, it +** is passed a single argument which is a copy of the pointer which was +** the fifth parameter to sqlite3_create_function_v2(). ** ** ^It is permitted to register multiple implementations of the same ** functions with the same name but with either differing numbers of ** arguments or differing preferred text encodings. ^SQLite will use ** the implementation that most closely matches the way in which the @@ -4403,19 +4382,10 @@ ** the collating function is deleted. ** ^Collating functions are deleted when they are overridden by later ** calls to the collation creation functions or when the ** [database connection] is closed using [sqlite3_close()]. ** -** ^The xDestroy callback is not called if the -** sqlite3_create_collation_v2() function fails. Applications that invoke -** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should -** check the return code and dispose of the application data pointer -** themselves rather than expecting SQLite to deal with it for them. -** This is different from every other SQLite interface. The inconsistency -** is unfortunate but cannot be changed without breaking backwards -** compatibility. -** ** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. */ SQLITE_API int sqlite3_create_collation( sqlite3*, const char *zName, @@ -5166,13 +5136,11 @@ ** when a new virtual table is be being created or reinitialized. ** ** ^The sqlite3_create_module_v2() interface has a fifth parameter which ** is a pointer to a destructor for the pClientData. ^SQLite will ** invoke the destructor function (if it is not NULL) when SQLite -** no longer needs the pClientData pointer. ^The destructor will also -** be invoked if the call to sqlite3_create_module_v2() fails. -** ^The sqlite3_create_module() +** no longer needs the pClientData pointer. ^The sqlite3_create_module() ** interface is equivalent to sqlite3_create_module_v2() with a NULL ** destructor. */ SQLITE_API int sqlite3_create_module( sqlite3 *db, /* SQLite connection to register module with */ @@ -5351,33 +5319,10 @@ sqlite3_int64 iRow, int flags, sqlite3_blob **ppBlob ); -/* -** CAPI3REF: Move a BLOB Handle to a New Row -** -** ^This function is used to move an existing blob handle so that it points -** to a different row of the same database table. ^The new row is identified -** by the rowid value passed as the second argument. Only the row can be -** changed. ^The database, table and column on which the blob handle is open -** remain the same. Moving an existing blob handle to a new row can be -** faster than closing the existing handle and opening a new one. -** -** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - -** it must exist and there must be either a blob or text value stored in -** the nominated column.)^ ^If the new row is not present in the table, or if -** it does not contain a blob or text value, or if another error occurs, an -** SQLite error code is returned and the blob handle is considered aborted. -** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or -** [sqlite3_blob_reopen()] on an aborted blob handle immediately return -** SQLITE_ABORT. -** -** ^This function sets the database handle error code and message. -*/ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); - /* ** CAPI3REF: Close A BLOB Handle ** ** ^Closes an open [BLOB handle]. ** @@ -9945,13 +9890,10 @@ Expr *pOn; /* The ON clause of a join */ IdList *pUsing; /* The USING clause of a join */ Bitmask colUsed; /* Bit N (1<" clause */ Index *pIndex; /* Index structure corresponding to zIndex, if any */ -#ifndef SQLITE_OMIT_EXPLAIN - int iSelectId; /* If pSelect!=0, the id of the sub-select in EQP */ -#endif } a[1]; /* One entry for each identifier on the list */ }; /* ** Permitted values of the SrcList.a.jointype field @@ -9980,11 +9922,10 @@ ** case that more than one of these conditions is true. */ struct WherePlan { u32 wsFlags; /* WHERE_* flags that describe the strategy */ u32 nEq; /* Number of == constraints */ - double nRow; /* Estimated number of rows (for EQP) */ union { Index *pIdx; /* Index when WHERE_INDEXED is true */ struct WhereTerm *pTerm; /* WHERE clause term for OR-search */ sqlite3_index_info *pVtabIdx; /* Virtual table index to use */ } u; @@ -10065,11 +10006,10 @@ int iContinue; /* Jump here to continue with next record */ int iBreak; /* Jump here to break out of the loop */ int nLevel; /* Number of nested loop */ struct WhereClause *pWC; /* Decomposition of the WHERE clause */ double savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */ - double nRowOut; /* Estimated number of output rows */ WhereLevel a[1]; /* Information about each nest loop in WHERE */ }; /* ** A NameContext defines a context in which to resolve table and column @@ -10141,11 +10081,10 @@ Select *pRightmost; /* Right-most select in a compound select statement */ Expr *pLimit; /* LIMIT expression. NULL means not used. */ Expr *pOffset; /* OFFSET expression. NULL means not used. */ int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */ int addrOpenEphm[3]; /* OP_OpenEphem opcodes related to this select */ - double nSelectRow; /* Estimated number of result rows */ }; /* ** Allowed values for Select.selFlags. The "SF" prefix stands for ** "Select Flag". @@ -10337,15 +10276,10 @@ Table **apVtabLock; /* Pointer to virtual tables needing locking */ #endif int nHeight; /* Expression tree height of current sub-select */ Table *pZombieTab; /* List of Table objects to delete after code gen */ TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ - -#ifndef SQLITE_OMIT_EXPLAIN - int iSelectId; - int iNextSelectId; -#endif }; #ifdef SQLITE_OMIT_VIRTUALTABLE #define IN_DECLARE_VTAB 0 #else @@ -27377,28 +27311,13 @@ if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ char zDb[MAX_PATHNAME+1]; /* Database file path */ int nDb; /* Number of valid bytes in zDb */ struct stat sStat; /* Output of stat() on database file */ - /* zPath is a path to a WAL or journal file. The following block derives - ** the path to the associated database file from zPath. This block handles - ** the following naming conventions: - ** - ** "-journal" - ** "-wal" - ** "-journal-NNNN" - ** "-wal-NNNN" - ** - ** where NNNN is a 4 digit decimal number. The NNNN naming schemes are - ** used by the test_multiplex.c module. - */ - nDb = sqlite3Strlen30(zPath) - 1; - while( nDb>0 && zPath[nDb]!='l' ) nDb--; - nDb -= ((flags & SQLITE_OPEN_WAL) ? 3 : 7); + nDb = sqlite3Strlen30(zPath) - ((flags & SQLITE_OPEN_WAL) ? 4 : 8); memcpy(zDb, zPath, nDb); zDb[nDb] = '\0'; - if( 0==stat(zDb, &sStat) ){ *pMode = sStat.st_mode & 0777; }else{ rc = SQLITE_IOERR_FSTAT; } @@ -34600,11 +34519,10 @@ # define sqlite3WalSavepointUndo(y,z) 0 # define sqlite3WalFrames(u,v,w,x,y,z) 0 # define sqlite3WalCheckpoint(u,v,w,x) 0 # define sqlite3WalCallback(z) 0 # define sqlite3WalExclusiveMode(y,z) 0 -# define sqlite3WalHeapMemory(z) 0 #else #define WAL_SAVEPOINT_NDATA 4 /* Connection to a write-ahead log (WAL) file. @@ -34611,11 +34529,11 @@ ** There is one object of this type for each pager. */ typedef struct Wal Wal; /* Open and close a connection to a write-ahead log. */ -SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *zName, int, Wal**); +SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *zName, Wal**); SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, int sync_flags, int, u8 *); /* Used by readers to open (lock) and close (unlock) a snapshot. A ** snapshot is like a read-transaction. It is the state of the database ** at an instant in time. sqlite3WalOpenSnapshot gets a read lock and @@ -34668,16 +34586,10 @@ /* Tell the wal layer that an EXCLUSIVE lock has been obtained (or released) ** by the pager layer on the database file. */ SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op); -/* Return true if the argument is non-NULL and the WAL module is using -** heap-memory for the wal-index. Otherwise, if the argument is NULL or the -** WAL module is using shared-memory, return false. -*/ -SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal); - #endif /* ifndef SQLITE_OMIT_WAL */ #endif /* _WAL_H_ */ /************** End of wal.h *************************************************/ /************** Continuing where we left off in pager.c **********************/ @@ -35584,13 +35496,11 @@ break; } return 1; } -#endif /* ifndef NDEBUG */ -#ifdef SQLITE_DEBUG /* ** Return a pointer to a human readable string in a static buffer ** containing the state of the Pager object passed as an argument. This ** is intended to be used within debuggers. For example, as an alternative ** to "print *pPager" in gdb: @@ -35710,11 +35620,11 @@ ** UNKNOWN_LOCK for an explanation of this. */ static int pagerUnlockDb(Pager *pPager, int eLock){ int rc = SQLITE_OK; - assert( !pPager->exclusiveMode || pPager->eLock==eLock ); + assert( !pPager->exclusiveMode ); assert( eLock==NO_LOCK || eLock==SHARED_LOCK ); assert( eLock!=NO_LOCK || pagerUseWal(pPager)==0 ); if( isOpen(pPager->fd) ){ assert( pPager->eLock>=eLock ); rc = sqlite3OsUnlock(pPager->fd, eLock); @@ -39184,11 +39094,11 @@ if( rc==SQLITE_OK ){ if( nPage==0 ){ sqlite3BeginBenignMalloc(); if( pagerLockDb(pPager, RESERVED_LOCK)==SQLITE_OK ){ sqlite3OsDelete(pVfs, pPager->zJournal, 0); - if( !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK); + pagerUnlockDb(pPager, SHARED_LOCK); } sqlite3EndBenignMalloc(); }else{ /* The journal file exists and no other connection has a reserved ** or greater lock on the database file. Now check that there is @@ -41010,12 +40920,11 @@ assert( eMode==PAGER_LOCKINGMODE_QUERY || eMode==PAGER_LOCKINGMODE_NORMAL || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); assert( PAGER_LOCKINGMODE_QUERY<0 ); assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); - assert( pPager->exclusiveMode || 0==sqlite3WalHeapMemory(pPager->pWal) ); - if( eMode>=0 && !pPager->tempFile && !sqlite3WalHeapMemory(pPager->pWal) ){ + if( eMode>=0 && !pPager->tempFile ){ pPager->exclusiveMode = (u8)eMode; } return (int)pPager->exclusiveMode; } @@ -41198,64 +41107,12 @@ ** Return true if the underlying VFS for the given pager supports the ** primitives necessary for write-ahead logging. */ SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager){ const sqlite3_io_methods *pMethods = pPager->fd->pMethods; - return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap); -} - -/* -** Attempt to take an exclusive lock on the database file. If a PENDING lock -** is obtained instead, immediately release it. -*/ -static int pagerExclusiveLock(Pager *pPager){ - int rc; /* Return code */ - - assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); - rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - /* If the attempt to grab the pending lock failed, release the - ** exclusive lock that may have been obtained instead. */ - pagerUnlockDb(pPager, SHARED_LOCK); - } - - return rc; -} - -/* -** Call sqlite3WalOpen() to open the WAL handle. If the pager is in -** exclusive-locking mode when this function is called, take an EXCLUSIVE -** lock on the database file and use heap-memory to store the wal-index -** in. Otherwise, use the normal shared-memory. -*/ -static int pagerOpenWal(Pager *pPager){ - int rc = SQLITE_OK; - - assert( pPager->pWal==0 && pPager->tempFile==0 ); - assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK || pPager->noReadlock); - - /* If the pager is already in exclusive-mode, the WAL module will use - ** heap-memory for the wal-index instead of the VFS shared-memory - ** implementation. Take the exclusive lock now, before opening the WAL - ** file, to make sure this is safe. - */ - if( pPager->exclusiveMode ){ - rc = pagerExclusiveLock(pPager); - } - - /* Open the connection to the log file. If this operation fails, - ** (e.g. due to malloc() failure), return an error code. - */ - if( rc==SQLITE_OK ){ - rc = sqlite3WalOpen(pPager->pVfs, - pPager->fd, pPager->zWal, pPager->exclusiveMode, &pPager->pWal - ); - } - - return rc; -} - + return pMethods->iVersion>=2 && pMethods->xShmMap!=0; +} /* ** The caller must be holding a SHARED lock on the database file to call ** this function. ** @@ -41286,11 +41143,15 @@ if( !sqlite3PagerWalSupported(pPager) ) return SQLITE_CANTOPEN; /* Close any rollback journal previously open */ sqlite3OsClose(pPager->jfd); - rc = pagerOpenWal(pPager); + /* Open the connection to the log file. If this operation fails, + ** (e.g. due to malloc() failure), unlock the database file and + ** return an error code. + */ + rc = sqlite3WalOpen(pPager->pVfs, pPager->fd, pPager->zWal, &pPager->pWal); if( rc==SQLITE_OK ){ pPager->journalMode = PAGER_JOURNALMODE_WAL; pPager->eState = PAGER_OPEN; } }else{ @@ -41325,25 +41186,30 @@ rc = sqlite3OsAccess( pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &logexists ); } if( rc==SQLITE_OK && logexists ){ - rc = pagerOpenWal(pPager); + rc = sqlite3WalOpen(pPager->pVfs, pPager->fd, + pPager->zWal, &pPager->pWal); } } /* Checkpoint and close the log. Because an EXCLUSIVE lock is held on ** the database file, the log and log-summary files will be deleted. */ if( rc==SQLITE_OK && pPager->pWal ){ - rc = pagerExclusiveLock(pPager); + rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); if( rc==SQLITE_OK ){ rc = sqlite3WalClose(pPager->pWal, - (pPager->noSync ? 0 : pPager->sync_flags), - pPager->pageSize, (u8*)pPager->pTmpSpace + (pPager->noSync ? 0 : pPager->sync_flags), + pPager->pageSize, (u8*)pPager->pTmpSpace ); pPager->pWal = 0; + }else{ + /* If we cannot get an EXCLUSIVE lock, downgrade the PENDING lock + ** that we did get back to SHARED. */ + pagerUnlockDb(pPager, SQLITE_LOCK_SHARED); } } return rc; } @@ -41795,17 +41661,10 @@ #ifdef SQLITE_DEBUG u8 lockError; /* True if a locking error has occurred */ #endif }; -/* -** Candidate values for Wal.exclusiveMode. -*/ -#define WAL_NORMAL_MODE 0 -#define WAL_EXCLUSIVE_MODE 1 -#define WAL_HEAPMEMORY_MODE 2 - /* ** Each page of the wal-index mapping contains a hash-table made up of ** an array of HASHTABLE_NSLOT elements of the following type. */ typedef u16 ht_slot; @@ -41888,18 +41747,13 @@ pWal->nWiData = iPage+1; } /* Request a pointer to the required page from the VFS */ if( pWal->apWiData[iPage]==0 ){ - if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ - pWal->apWiData[iPage] = (u32 volatile *)sqlite3MallocZero(WALINDEX_PGSZ); - if( !pWal->apWiData[iPage] ) rc = SQLITE_NOMEM; - }else{ - rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, - pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] - ); - } + rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, + pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] + ); } *ppPage = pWal->apWiData[iPage]; assert( iPage==0 || *ppPage || rc!=SQLITE_OK ); return rc; @@ -41978,16 +41832,10 @@ aOut[0] = s1; aOut[1] = s2; } -static void walShmBarrier(Wal *pWal){ - if( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE ){ - sqlite3OsShmBarrier(pWal->pDbFd); - } -} - /* ** Write the header information in pWal->hdr into the wal-index. ** ** The checksum on pWal->hdr is updated before it is written. */ @@ -41998,11 +41846,11 @@ assert( pWal->writeLock ); pWal->hdr.isInit = 1; pWal->hdr.iVersion = WALINDEX_MAX_VERSION; walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum); memcpy((void *)&aHdr[1], (void *)&pWal->hdr, sizeof(WalIndexHdr)); - walShmBarrier(pWal); + sqlite3OsShmBarrier(pWal->pDbFd); memcpy((void *)&aHdr[0], (void *)&pWal->hdr, sizeof(WalIndexHdr)); } /* ** This function encodes a single frame header and writes it to a buffer @@ -42570,19 +42418,11 @@ /* ** Close an open wal-index. */ static void walIndexClose(Wal *pWal, int isDelete){ - if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ - int i; - for(i=0; inWiData; i++){ - sqlite3_free((void *)pWal->apWiData[i]); - pWal->apWiData[i] = 0; - } - }else{ - sqlite3OsShmUnmap(pWal->pDbFd, isDelete); - } + sqlite3OsShmUnmap(pWal->pDbFd, isDelete); } /* ** Open a connection to the WAL file zWalName. The database file must ** already be opened on connection pDbFd. The buffer that zWalName points @@ -42600,11 +42440,10 @@ */ SQLITE_PRIVATE int sqlite3WalOpen( sqlite3_vfs *pVfs, /* vfs module to open wal and wal-index */ sqlite3_file *pDbFd, /* The open database file */ const char *zWalName, /* Name of the WAL file */ - int bNoShm, /* True to run in heap-memory mode */ Wal **ppWal /* OUT: Allocated Wal handle */ ){ int rc; /* Return Code */ Wal *pRet; /* Object to allocate and return */ int flags; /* Flags passed to OsOpen() */ @@ -42634,11 +42473,10 @@ pRet->pVfs = pVfs; pRet->pWalFd = (sqlite3_file *)&pRet[1]; pRet->pDbFd = pDbFd; pRet->readLock = -1; pRet->zWalName = zWalName; - pRet->exclusiveMode = (bNoShm ? WAL_HEAPMEMORY_MODE: WAL_NORMAL_MODE); /* Open file handle on the write-ahead log file. */ flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL); rc = sqlite3OsOpen(pVfs, zWalName, pRet->pWalFd, flags, &flags); if( rc==SQLITE_OK && flags&SQLITE_OPEN_READONLY ){ @@ -43068,13 +42906,11 @@ ** ** The EXCLUSIVE lock is not released before returning. */ rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE); if( rc==SQLITE_OK ){ - if( pWal->exclusiveMode==WAL_NORMAL_MODE ){ - pWal->exclusiveMode = WAL_EXCLUSIVE_MODE; - } + pWal->exclusiveMode = 1; rc = sqlite3WalCheckpoint(pWal, sync_flags, nBuf, zBuf); if( rc==SQLITE_OK ){ isDelete = 1; } } @@ -43126,11 +42962,11 @@ ** Memory barriers are used to prevent the compiler or the hardware from ** reordering the reads and writes. */ aHdr = walIndexHdr(pWal); memcpy(&h1, (void *)&aHdr[0], sizeof(h1)); - walShmBarrier(pWal); + sqlite3OsShmBarrier(pWal->pDbFd); memcpy(&h2, (void *)&aHdr[1], sizeof(h2)); if( memcmp(&h1, &h2, sizeof(h1))!=0 ){ return 1; /* Dirty read */ } @@ -43327,11 +43163,11 @@ if( !useWal && pInfo->nBackfill==pWal->hdr.mxFrame ){ /* The WAL has been completely backfilled (or it is empty). ** and can be safely ignored. */ rc = walLockShared(pWal, WAL_READ_LOCK(0)); - walShmBarrier(pWal); + sqlite3OsShmBarrier(pWal->pDbFd); if( rc==SQLITE_OK ){ if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ /* It is not safe to allow the reader to continue here if frames ** may have been appended to the log before READ_LOCK(0) was obtained. ** When holding READ_LOCK(0), the reader ignores the entire log file, @@ -43421,11 +43257,11 @@ ** date before proceeding. That would not be possible without somehow ** blocking writers. It only guarantees that a dangerous checkpoint or ** log-wrap (either of which would require an exclusive lock on ** WAL_READ_LOCK(mxI)) has not occurred since the snapshot was valid. */ - walShmBarrier(pWal); + sqlite3OsShmBarrier(pWal->pDbFd); if( pInfo->aReadMark[mxI]!=mxReadMark || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ walUnlockShared(pWal, WAL_READ_LOCK(mxI)); return WAL_RETRY; @@ -44064,18 +43900,17 @@ ** WAL is already in exclusive-locking mode - meaning that this ** routine is a no-op. The pager must already hold the exclusive lock ** on the main database file before invoking this operation. ** ** If op is negative, then do a dry-run of the op==1 case but do -** not actually change anything. The pager uses this to see if it +** not actually change anything. The pager uses this to see if it ** should acquire the database exclusive lock prior to invoking ** the op==1 case. */ SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){ int rc; assert( pWal->writeLock==0 ); - assert( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE || op==-1 ); /* pWal->readLock is usually set, but might be -1 if there was a ** prior error while attempting to acquire are read-lock. This cannot ** happen if the connection is actually in exclusive mode (as no xShmLock ** locks are taken in this case). Nor should the pager attempt to @@ -44105,19 +43940,10 @@ rc = pWal->exclusiveMode==0; } return rc; } -/* -** Return true if the argument is non-NULL and the WAL module is using -** heap-memory for the wal-index. Otherwise, if the argument is NULL or the -** WAL module is using shared-memory, return false. -*/ -SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal){ - return (pWal && pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ); -} - #endif /* #ifndef SQLITE_OMIT_WAL */ /************** End of wal.c *************************************************/ /************** Begin file btmutex.c *****************************************/ /* @@ -48293,21 +48119,20 @@ ** ** This will release the write lock on the database file. If there ** are no active cursors, it also releases the read lock. */ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p){ + BtShared *pBt = p->pBt; - if( p->inTrans==TRANS_NONE ) return SQLITE_OK; sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the handle has a write-transaction open, commit the shared-btrees ** transaction and set the shared state to TRANS_READ. */ if( p->inTrans==TRANS_WRITE ){ int rc; - BtShared *pBt = p->pBt; assert( pBt->inTransaction==TRANS_WRITE ); assert( pBt->nTransaction>0 ); rc = sqlite3PagerCommitPhaseTwo(pBt->pPager); if( rc!=SQLITE_OK ){ sqlite3BtreeLeave(p); @@ -53222,11 +53047,12 @@ ** sqlite3BtreePutData()). */ SQLITE_PRIVATE void sqlite3BtreeCacheOverflow(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - invalidateOverflowCache(pCur); + assert(!pCur->isIncrblobHandle); + assert(!pCur->aOverflow); pCur->isIncrblobHandle = 1; } #endif /* @@ -56255,14 +56081,16 @@ pMem->flags = MEM_Int; pMem->u.i = pOp->p2; /* P2 */ pMem->type = SQLITE_INTEGER; pMem++; - pMem->flags = MEM_Int; - pMem->u.i = pOp->p3; /* P3 */ - pMem->type = SQLITE_INTEGER; - pMem++; + if( p->explain==1 ){ + pMem->flags = MEM_Int; + pMem->u.i = pOp->p3; /* P3 */ + pMem->type = SQLITE_INTEGER; + pMem++; + } if( sqlite3VdbeMemGrow(pMem, 32, 0) ){ /* P4 */ assert( p->db->mallocFailed ); return SQLITE_ERROR; } @@ -56303,11 +56131,11 @@ pMem->flags = MEM_Null; /* Comment */ pMem->type = SQLITE_NULL; } } - p->nResColumn = 8 - 4*(p->explain-1); + p->nResColumn = 8 - 5*(p->explain-1); p->rc = SQLITE_OK; rc = SQLITE_ROW; } return rc; } @@ -59250,12 +59078,10 @@ } sqlite3Error(p->db, rc, 0); rc = sqlite3ApiExit(p->db, rc); } sqlite3_mutex_leave(p->db->mutex); - }else if( xDel!=SQLITE_STATIC && xDel!=SQLITE_TRANSIENT ){ - xDel((void*)zData); } return rc; } @@ -59494,18 +59320,10 @@ */ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){ return pStmt ? ((Vdbe*)pStmt)->db : 0; } -/* -** Return true if the prepared statement is guaranteed to not modify the -** database. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt){ - return pStmt ? ((Vdbe*)pStmt)->readOnly : 1; -} - /* ** Return a pointer to the next prepared statement after pStmt associated ** with database connection pDb. If pStmt is NULL, return the first ** prepared statement for the database connection. Return NULL if there ** are no more. @@ -66253,86 +66071,15 @@ typedef struct Incrblob Incrblob; struct Incrblob { int flags; /* Copy of "flags" passed to sqlite3_blob_open() */ int nByte; /* Size of open blob, in bytes */ int iOffset; /* Byte offset of blob in cursor data */ - int iCol; /* Table column this handle is open on */ BtCursor *pCsr; /* Cursor pointing at blob row */ sqlite3_stmt *pStmt; /* Statement holding cursor open */ sqlite3 *db; /* The associated database */ }; - -/* -** This function is used by both blob_open() and blob_reopen(). It seeks -** the b-tree cursor associated with blob handle p to point to row iRow. -** If successful, SQLITE_OK is returned and subsequent calls to -** sqlite3_blob_read() or sqlite3_blob_write() access the specified row. -** -** If an error occurs, or if the specified row does not exist or does not -** contain a value of type TEXT or BLOB in the column nominated when the -** blob handle was opened, then an error code is returned and *pzErr may -** be set to point to a buffer containing an error message. It is the -** responsibility of the caller to free the error message buffer using -** sqlite3DbFree(). -** -** If an error does occur, then the b-tree cursor is closed. All subsequent -** calls to sqlite3_blob_read(), blob_write() or blob_reopen() will -** immediately return SQLITE_ABORT. -*/ -static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){ - int rc; /* Error code */ - char *zErr = 0; /* Error message */ - Vdbe *v = (Vdbe *)p->pStmt; - - /* Set the value of the SQL statements only variable to integer iRow. - ** This is done directly instead of using sqlite3_bind_int64() to avoid - ** triggering asserts related to mutexes. - */ - assert( v->aVar[0].flags&MEM_Int ); - v->aVar[0].u.i = iRow; - - rc = sqlite3_step(p->pStmt); - if( rc==SQLITE_ROW ){ - u32 type = v->apCsr[0]->aType[p->iCol]; - if( type<12 ){ - zErr = sqlite3MPrintf(p->db, "cannot open value of type %s", - type==0?"null": type==7?"real": "integer" - ); - rc = SQLITE_ERROR; - sqlite3_finalize(p->pStmt); - p->pStmt = 0; - }else{ - p->iOffset = v->apCsr[0]->aOffset[p->iCol]; - p->nByte = sqlite3VdbeSerialTypeLen(type); - p->pCsr = v->apCsr[0]->pCursor; - sqlite3BtreeEnterCursor(p->pCsr); - sqlite3BtreeCacheOverflow(p->pCsr); - sqlite3BtreeLeaveCursor(p->pCsr); - } - } - - if( rc==SQLITE_ROW ){ - rc = SQLITE_OK; - }else if( p->pStmt ){ - rc = sqlite3_finalize(p->pStmt); - p->pStmt = 0; - if( rc==SQLITE_OK ){ - zErr = sqlite3MPrintf(p->db, "no such rowid: %lld", iRow); - rc = SQLITE_ERROR; - }else{ - zErr = sqlite3MPrintf(p->db, "%s", sqlite3_errmsg(p->db)); - } - } - - assert( rc!=SQLITE_OK || zErr==0 ); - assert( rc!=SQLITE_ROW && rc!=SQLITE_DONE ); - - *pzErr = zErr; - return rc; -} - /* ** Open a blob handle. */ SQLITE_API int sqlite3_blob_open( sqlite3* db, /* The database connection */ @@ -66369,39 +66116,33 @@ /* One of the following two instructions is replaced by an OP_Noop. */ {OP_OpenRead, 0, 0, 0}, /* 3: Open cursor 0 for reading */ {OP_OpenWrite, 0, 0, 0}, /* 4: Open cursor 0 for read/write */ {OP_Variable, 1, 1, 1}, /* 5: Push the rowid to the stack */ - {OP_NotExists, 0, 10, 1}, /* 6: Seek the cursor */ + {OP_NotExists, 0, 9, 1}, /* 6: Seek the cursor */ {OP_Column, 0, 0, 1}, /* 7 */ {OP_ResultRow, 1, 0, 0}, /* 8 */ - {OP_Goto, 0, 5, 0}, /* 9 */ - {OP_Close, 0, 0, 0}, /* 10 */ - {OP_Halt, 0, 0, 0}, /* 11 */ + {OP_Close, 0, 0, 0}, /* 9 */ + {OP_Halt, 0, 0, 0}, /* 10 */ }; + Vdbe *v = 0; int rc = SQLITE_OK; char *zErr = 0; Table *pTab; - Parse *pParse = 0; - Incrblob *pBlob = 0; + Parse *pParse; - flags = !!flags; /* flags = (flags ? 1 : 0); */ *ppBlob = 0; - sqlite3_mutex_enter(db->mutex); - - pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); - if( !pBlob ) goto blob_open_out; pParse = sqlite3StackAllocRaw(db, sizeof(*pParse)); - if( !pParse ) goto blob_open_out; - + if( pParse==0 ){ + rc = SQLITE_NOMEM; + goto blob_open_out; + } do { memset(pParse, 0, sizeof(Parse)); pParse->db = db; - sqlite3DbFree(db, zErr); - zErr = 0; sqlite3BtreeEnterAll(db); pTab = sqlite3LocateTable(pParse, 0, zTable, zDb); if( pTab && IsVirtual(pTab) ){ pTab = 0; @@ -66423,11 +66164,11 @@ sqlite3BtreeLeaveAll(db); goto blob_open_out; } /* Now search pTab for the exact column. */ - for(iCol=0; iColnCol; iCol++) { + for(iCol=0; iCol < pTab->nCol; iCol++) { if( sqlite3StrICmp(pTab->aCol[iCol].zName, zColumn)==0 ){ break; } } if( iCol==pTab->nCol ){ @@ -66477,18 +66218,15 @@ sqlite3BtreeLeaveAll(db); goto blob_open_out; } } - pBlob->pStmt = (sqlite3_stmt *)sqlite3VdbeCreate(db); - assert( pBlob->pStmt || db->mallocFailed ); - if( pBlob->pStmt ){ - Vdbe *v = (Vdbe *)pBlob->pStmt; + v = sqlite3VdbeCreate(db); + if( v ){ int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - sqlite3VdbeAddOpList(v, sizeof(openBlob)/sizeof(VdbeOpList), openBlob); - + flags = !!flags; /* flags = (flags ? 1 : 0); */ /* Configure the OP_Transaction */ sqlite3VdbeChangeP1(v, 0, iDb); sqlite3VdbeChangeP2(v, 0, flags); @@ -66527,29 +66265,69 @@ if( !db->mallocFailed ){ sqlite3VdbeMakeReady(v, 1, 1, 1, 0, 0, 0); } } - pBlob->flags = flags; - pBlob->iCol = iCol; - pBlob->db = db; sqlite3BtreeLeaveAll(db); if( db->mallocFailed ){ goto blob_open_out; } - sqlite3_bind_int64(pBlob->pStmt, 1, iRow); - rc = blobSeekToRow(pBlob, iRow, &zErr); - } while( (++nAttempt)<5 && rc==SQLITE_SCHEMA ); + + sqlite3_bind_int64((sqlite3_stmt *)v, 1, iRow); + rc = sqlite3_step((sqlite3_stmt *)v); + if( rc!=SQLITE_ROW ){ + nAttempt++; + rc = sqlite3_finalize((sqlite3_stmt *)v); + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, sqlite3_errmsg(db)); + v = 0; + } + } while( nAttempt<5 && rc==SQLITE_SCHEMA ); + + if( rc==SQLITE_ROW ){ + /* The row-record has been opened successfully. Check that the + ** column in question contains text or a blob. If it contains + ** text, it is up to the caller to get the encoding right. + */ + Incrblob *pBlob; + u32 type = v->apCsr[0]->aType[iCol]; + + if( type<12 ){ + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "cannot open value of type %s", + type==0?"null": type==7?"real": "integer" + ); + rc = SQLITE_ERROR; + goto blob_open_out; + } + pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); + if( db->mallocFailed ){ + sqlite3DbFree(db, pBlob); + goto blob_open_out; + } + pBlob->flags = flags; + pBlob->pCsr = v->apCsr[0]->pCursor; + sqlite3BtreeEnterCursor(pBlob->pCsr); + sqlite3BtreeCacheOverflow(pBlob->pCsr); + sqlite3BtreeLeaveCursor(pBlob->pCsr); + pBlob->pStmt = (sqlite3_stmt *)v; + pBlob->iOffset = v->apCsr[0]->aOffset[iCol]; + pBlob->nByte = sqlite3VdbeSerialTypeLen(type); + pBlob->db = db; + *ppBlob = (sqlite3_blob *)pBlob; + rc = SQLITE_OK; + }else if( rc==SQLITE_OK ){ + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "no such rowid: %lld", iRow); + rc = SQLITE_ERROR; + } blob_open_out: - if( rc==SQLITE_OK && db->mallocFailed==0 ){ - *ppBlob = (sqlite3_blob *)pBlob; - }else{ - if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); - sqlite3DbFree(db, pBlob); + if( v && (rc!=SQLITE_OK || db->mallocFailed) ){ + sqlite3VdbeFinalize(v); } - sqlite3Error(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3Error(db, rc, zErr); sqlite3DbFree(db, zErr); sqlite3StackFree(db, pParse); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; @@ -66598,11 +66376,11 @@ if( n<0 || iOffset<0 || (iOffset+n)>p->nByte ){ /* Request is out of range. Return a transient error. */ rc = SQLITE_ERROR; sqlite3Error(db, SQLITE_ERROR, 0); - }else if( v==0 ){ + } else if( v==0 ){ /* If there is no statement handle, then the blob-handle has ** already been invalidated. Return SQLITE_ABORT in this case. */ rc = SQLITE_ABORT; }else{ @@ -66649,49 +66427,10 @@ SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *pBlob){ Incrblob *p = (Incrblob *)pBlob; return p ? p->nByte : 0; } -/* -** Move an existing blob handle to point to a different row of the same -** database table. -** -** If an error occurs, or if the specified row does not exist or does not -** contain a blob or text value, then an error code is returned and the -** database handle error code and message set. If this happens, then all -** subsequent calls to sqlite3_blob_xxx() functions (except blob_close()) -** immediately return SQLITE_ABORT. -*/ -SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ - int rc; - Incrblob *p = (Incrblob *)pBlob; - sqlite3 *db; - - if( p==0 ) return SQLITE_MISUSE_BKPT; - db = p->db; - sqlite3_mutex_enter(db->mutex); - - if( p->pStmt==0 ){ - /* If there is no statement handle, then the blob-handle has - ** already been invalidated. Return SQLITE_ABORT in this case. - */ - rc = SQLITE_ABORT; - }else{ - char *zErr; - rc = blobSeekToRow(p, iRow, &zErr); - if( rc!=SQLITE_OK ){ - sqlite3Error(db, rc, (zErr ? "%s" : 0), zErr); - sqlite3DbFree(db, zErr); - } - assert( rc!=SQLITE_SCHEMA ); - } - - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - #endif /* #ifndef SQLITE_OMIT_INCRBLOB */ /************** End of vdbeblob.c ********************************************/ /************** Begin file journal.c *****************************************/ /* @@ -69018,13 +68757,10 @@ Expr *pRight, /* Right operand */ const Token *pToken /* Argument token */ ){ Expr *p = sqlite3ExprAlloc(pParse->db, op, pToken, 1); sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight); - if( p ) { - sqlite3ExprCheckHeight(pParse, p->nHeight); - } return p; } /* ** Join two expressions using an AND operator. If either expression is @@ -70134,20 +69870,10 @@ sqlite3VdbeAddOp1(v, OP_If, mem); testAddr = sqlite3VdbeAddOp2(v, OP_Integer, 1, mem); assert( testAddr>0 || pParse->db->mallocFailed ); } -#ifndef SQLITE_OMIT_EXPLAIN - if( pParse->explain==2 ){ - char *zMsg = sqlite3MPrintf( - pParse->db, "EXECUTE %s%s SUBQUERY %d", testAddr?"":"CORRELATED ", - pExpr->op==TK_IN?"LIST":"SCALAR", pParse->iNextSelectId - ); - sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); - } -#endif - switch( pExpr->op ){ case TK_IN: { char affinity; /* Affinity of the LHS of the IN */ KeyInfo keyInfo; /* Keyinfo for the generated table */ int addr; /* Address of OP_OpenEphemeral instruction */ @@ -84330,31 +84056,10 @@ int (*extended_result_codes)(sqlite3*,int); int (*limit)(sqlite3*,int,int); sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); const char *(*sql)(sqlite3_stmt*); int (*status)(int,int*,int*,int); - int (*backup_finish)(sqlite3_backup*); - sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); - int (*backup_pagecount)(sqlite3_backup*); - int (*backup_remaining)(sqlite3_backup*); - int (*backup_step)(sqlite3_backup*,int); - const char *(*compileoption_get)(int); - int (*compileoption_used)(const char*); - int (*create_function_v2)(sqlite3*,const char*,int,int,void*,void (*xFunc)(sqlite3_context*,int,sqlite3_value**),void (*xStep)(sqlite3_context*,int,sqlite3_value**),void (*xFinal)(sqlite3_context*),void(*xDestroy)(void*)); - int (*db_config)(sqlite3*,int,...); - sqlite3_mutex *(*db_mutex)(sqlite3*); - int (*db_status)(sqlite3*,int,int*,int*,int); - int (*extended_errcode)(sqlite3*); - void (*log)(int,const char*,...); - sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); - const char *(*sourceid)(void); - int (*stmt_status)(sqlite3_stmt*,int,int); - int (*strnicmp)(const char*,const char*,int); - int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); - int (*wal_autocheckpoint)(sqlite3*,int); - int (*wal_checkpoint)(sqlite3*,const char*); - void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); }; /* ** The following macros redefine the API routines so that they are ** redirected throught the global sqlite3_api structure. @@ -84530,31 +84235,10 @@ #define sqlite3_extended_result_codes sqlite3_api->extended_result_codes #define sqlite3_limit sqlite3_api->limit #define sqlite3_next_stmt sqlite3_api->next_stmt #define sqlite3_sql sqlite3_api->sql #define sqlite3_status sqlite3_api->status -#define sqlite3_backup_finish sqlite3_api->backup_finish -#define sqlite3_backup_init sqlite3_api->backup_init -#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount -#define sqlite3_backup_remaining sqlite3_api->backup_remaining -#define sqlite3_backup_step sqlite3_api->backup_step -#define sqlite3_compileoption_get sqlite3_api->compileoption_get -#define sqlite3_compileoption_used sqlite3_api->compileoption_used -#define sqlite3_create_function_v2 sqlite3_api->create_function_v2 -#define sqlite3_db_config sqlite3_api->db_config -#define sqlite3_db_mutex sqlite3_api->db_mutex -#define sqlite3_db_status sqlite3_api->db_status -#define sqlite3_extended_errcode sqlite3_api->extended_errcode -#define sqlite3_log sqlite3_api->log -#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 -#define sqlite3_sourceid sqlite3_api->sourceid -#define sqlite3_stmt_status sqlite3_api->stmt_status -#define sqlite3_strnicmp sqlite3_api->strnicmp -#define sqlite3_unlock_notify sqlite3_api->unlock_notify -#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint -#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint -#define sqlite3_wal_hook sqlite3_api->wal_hook #endif /* SQLITE_CORE */ #define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api = 0; #define SQLITE_EXTENSION_INIT2(v) sqlite3_api = v; @@ -84868,50 +84552,10 @@ sqlite3_extended_result_codes, sqlite3_limit, sqlite3_next_stmt, sqlite3_sql, sqlite3_status, - - /* - ** Added for 3.7.4 - */ - sqlite3_backup_finish, - sqlite3_backup_init, - sqlite3_backup_pagecount, - sqlite3_backup_remaining, - sqlite3_backup_step, -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS - sqlite3_compileoption_get, - sqlite3_compileoption_used, -#else - 0, - 0, -#endif - sqlite3_create_function_v2, - sqlite3_db_config, - sqlite3_db_mutex, - sqlite3_db_status, - sqlite3_extended_errcode, - sqlite3_log, - sqlite3_soft_heap_limit64, - sqlite3_sourceid, - sqlite3_stmt_status, - sqlite3_strnicmp, -#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY - sqlite3_unlock_notify, -#else - 0, -#endif -#ifndef SQLITE_OMIT_WAL - sqlite3_wal_autocheckpoint, - sqlite3_wal_checkpoint, - sqlite3_wal_hook, -#else - 0, - 0, - 0, -#endif }; /* ** Attempt to load an SQLite extension library contained in the file ** zFile. The entry point is zProc. zProc may be 0 in which case a @@ -87332,17 +86976,17 @@ #ifndef SQLITE_OMIT_EXPLAIN if( rc==SQLITE_OK && pParse->pVdbe && pParse->explain ){ static const char * const azColName[] = { "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", - "selectid", "order", "from", "detail" + "order", "from", "detail" }; int iFirst, mx; if( pParse->explain==2 ){ - sqlite3VdbeSetNumCols(pParse->pVdbe, 4); + sqlite3VdbeSetNumCols(pParse->pVdbe, 3); iFirst = 8; - mx = 12; + mx = 11; }else{ sqlite3VdbeSetNumCols(pParse->pVdbe, 8); iFirst = 0; mx = 8; } @@ -88337,92 +87981,10 @@ } } return pInfo; } -#ifndef SQLITE_OMIT_COMPOUND_SELECT -/* -** Name of the connection operator, used for error messages. -*/ -static const char *selectOpName(int id){ - char *z; - switch( id ){ - case TK_ALL: z = "UNION ALL"; break; - case TK_INTERSECT: z = "INTERSECT"; break; - case TK_EXCEPT: z = "EXCEPT"; break; - default: z = "UNION"; break; - } - return z; -} -#endif /* SQLITE_OMIT_COMPOUND_SELECT */ - -#ifndef SQLITE_OMIT_EXPLAIN -/* -** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function -** is a no-op. Otherwise, it adds a single row of output to the EQP result, -** where the caption is of the form: -** -** "USE TEMP B-TREE FOR xxx" -** -** where xxx is one of "DISTINCT", "ORDER BY" or "GROUP BY". Exactly which -** is determined by the zUsage argument. -*/ -static void explainTempTable(Parse *pParse, const char *zUsage){ - if( pParse->explain==2 ){ - Vdbe *v = pParse->pVdbe; - char *zMsg = sqlite3MPrintf(pParse->db, "USE TEMP B-TREE FOR %s", zUsage); - sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); - } -} - -/* -** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function -** is a no-op. Otherwise, it adds a single row of output to the EQP result, -** where the caption is of one of the two forms: -** -** "COMPOSITE SUBQUERIES iSub1 and iSub2 (op)" -** "COMPOSITE SUBQUERIES iSub1 and iSub2 USING TEMP B-TREE (op)" -** -** where iSub1 and iSub2 are the integers passed as the corresponding -** function parameters, and op is the text representation of the parameter -** of the same name. The parameter "op" must be one of TK_UNION, TK_EXCEPT, -** TK_INTERSECT or TK_ALL. The first form is used if argument bUseTmp is -** false, or the second form if it is true. -*/ -static void explainComposite( - Parse *pParse, /* Parse context */ - int op, /* One of TK_UNION, TK_EXCEPT etc. */ - int iSub1, /* Subquery id 1 */ - int iSub2, /* Subquery id 2 */ - int bUseTmp /* True if a temp table was used */ -){ - assert( op==TK_UNION || op==TK_EXCEPT || op==TK_INTERSECT || op==TK_ALL ); - if( pParse->explain==2 ){ - Vdbe *v = pParse->pVdbe; - char *zMsg = sqlite3MPrintf( - pParse->db, "COMPOUND SUBQUERIES %d AND %d %s(%s)", iSub1, iSub2, - bUseTmp?"USING TEMP B-TREE ":"", selectOpName(op) - ); - sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); - } -} - -/* -** Assign expression b to lvalue a. A second, no-op, version of this macro -** is provided when SQLITE_OMIT_EXPLAIN is defined. This allows the code -** in sqlite3Select() to assign values to structure member variables that -** only exist if SQLITE_OMIT_EXPLAIN is not defined without polluting the -** code with #ifndef directives. -*/ -# define explainSetInteger(a, b) a = b - -#else -/* No-op versions of the explainXXX() functions and macros. */ -# define explainTempTable(y,z) -# define explainComposite(v,w,x,y,z) -# define explainSetInteger(y,z) -#endif /* ** If the inner loop was generated using a non-null pOrderBy argument, ** then the results were placed in a sorter. After the loop is terminated ** we need to run the sorter and output the results. The following @@ -88766,10 +88328,26 @@ } } generateColumnTypes(pParse, pTabList, pEList); } +#ifndef SQLITE_OMIT_COMPOUND_SELECT +/* +** Name of the connection operator, used for error messages. +*/ +static const char *selectOpName(int id){ + char *z; + switch( id ){ + case TK_ALL: z = "UNION ALL"; break; + case TK_INTERSECT: z = "INTERSECT"; break; + case TK_EXCEPT: z = "EXCEPT"; break; + default: z = "UNION"; break; + } + return z; +} +#endif /* SQLITE_OMIT_COMPOUND_SELECT */ + /* ** Given a an expression list (which is really the list of expressions ** that form the result set of a SELECT statement) compute appropriate ** column names for a table that would hold the expression list. ** @@ -88928,11 +88506,10 @@ /* The sqlite3ResultSetOfSelect() is only used n contexts where lookaside ** is disabled */ assert( db->lookaside.bEnabled==0 ); pTab->nRef = 1; pTab->zName = 0; - pTab->nRowEst = 1000000; selectColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); selectAddColumnTypeAndCollation(pParse, pTab->nCol, pTab->aCol, pSelect); pTab->iPKey = -1; if( db->mallocFailed ){ sqlite3DeleteTable(db, pTab); @@ -88999,12 +88576,10 @@ if( sqlite3ExprIsInteger(p->pLimit, &n) ){ sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit); VdbeComment((v, "LIMIT counter")); if( n==0 ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, iBreak); - }else{ - if( p->nSelectRow > (double)n ) p->nSelectRow = (double)n; } }else{ sqlite3ExprCode(pParse, p->pLimit, iLimit); sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); VdbeComment((v, "LIMIT counter")); @@ -89101,14 +88676,10 @@ Select *pPrior; /* Another SELECT immediately to our left */ Vdbe *v; /* Generate code to this VDBE */ SelectDest dest; /* Alternative data destination */ Select *pDelete = 0; /* Chain of simple selects to delete */ sqlite3 *db; /* Database connection */ -#ifndef SQLITE_OMIT_EXPLAIN - int iSub1; /* EQP id of left-hand query */ - int iSub2; /* EQP id of right-hand query */ -#endif /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only ** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT. */ assert( p && p->pPrior ); /* Calling function guarantees this much */ @@ -89162,15 +88733,13 @@ /* Generate code for the left and right SELECT statements. */ switch( p->op ){ case TK_ALL: { int addr = 0; - int nLimit; assert( !pPrior->pLimit ); pPrior->pLimit = p->pLimit; pPrior->pOffset = p->pOffset; - explainSetInteger(iSub1, pParse->iNextSelectId); rc = sqlite3Select(pParse, pPrior, &dest); p->pLimit = 0; p->pOffset = 0; if( rc ){ goto multi_select_end; @@ -89180,22 +88749,14 @@ p->iOffset = pPrior->iOffset; if( p->iLimit ){ addr = sqlite3VdbeAddOp1(v, OP_IfZero, p->iLimit); VdbeComment((v, "Jump ahead if LIMIT reached")); } - explainSetInteger(iSub2, pParse->iNextSelectId); rc = sqlite3Select(pParse, p, &dest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; p->pPrior = pPrior; - p->nSelectRow += pPrior->nSelectRow; - if( pPrior->pLimit - && sqlite3ExprIsInteger(pPrior->pLimit, &nLimit) - && p->nSelectRow > (double)nLimit - ){ - p->nSelectRow = (double)nLimit; - } if( addr ){ sqlite3VdbeJumpHere(v, addr); } break; } @@ -89235,11 +88796,10 @@ /* Code the SELECT statements to our left */ assert( !pPrior->pOrderBy ); sqlite3SelectDestInit(&uniondest, priorOp, unionTab); - explainSetInteger(iSub1, pParse->iNextSelectId); rc = sqlite3Select(pParse, pPrior, &uniondest); if( rc ){ goto multi_select_end; } @@ -89255,20 +88815,18 @@ pLimit = p->pLimit; p->pLimit = 0; pOffset = p->pOffset; p->pOffset = 0; uniondest.eDest = op; - explainSetInteger(iSub2, pParse->iNextSelectId); rc = sqlite3Select(pParse, p, &uniondest); testcase( rc!=SQLITE_OK ); /* Query flattening in sqlite3Select() might refill p->pOrderBy. ** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */ sqlite3ExprListDelete(db, p->pOrderBy); pDelete = p->pPrior; p->pPrior = pPrior; p->pOrderBy = 0; - if( p->op==TK_UNION ) p->nSelectRow += pPrior->nSelectRow; sqlite3ExprDelete(db, p->pLimit); p->pLimit = pLimit; p->pOffset = pOffset; p->iLimit = 0; p->iOffset = 0; @@ -89322,11 +88880,10 @@ assert( p->pEList ); /* Code the SELECTs to our left into temporary table "tab1". */ sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); - explainSetInteger(iSub1, pParse->iNextSelectId); rc = sqlite3Select(pParse, pPrior, &intersectdest); if( rc ){ goto multi_select_end; } @@ -89339,16 +88896,14 @@ pLimit = p->pLimit; p->pLimit = 0; pOffset = p->pOffset; p->pOffset = 0; intersectdest.iParm = tab2; - explainSetInteger(iSub2, pParse->iNextSelectId); rc = sqlite3Select(pParse, p, &intersectdest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; p->pPrior = pPrior; - if( p->nSelectRow>pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; sqlite3ExprDelete(db, p->pLimit); p->pLimit = pLimit; p->pOffset = pOffset; /* Generate code to take the intersection of the two temporary @@ -89377,12 +88932,10 @@ sqlite3VdbeAddOp2(v, OP_Close, tab1, 0); break; } } - explainComposite(pParse, p->op, iSub1, iSub2, p->op!=TK_ALL); - /* Compute collating sequences used by ** temporary tables needed to implement the compound select. ** Attach the KeyInfo structure to all temporary tables. ** ** This section is run by the right-most SELECT statement only. @@ -89722,14 +89275,10 @@ KeyInfo *pKeyMerge; /* Comparison information for merging rows */ sqlite3 *db; /* Database connection */ ExprList *pOrderBy; /* The ORDER BY clause */ int nOrderBy; /* Number of terms in the ORDER BY clause */ int *aPermute; /* Mapping from ORDER BY terms to result set columns */ -#ifndef SQLITE_OMIT_EXPLAIN - int iSub1; /* EQP id of left-hand query */ - int iSub2; /* EQP id of right-hand query */ -#endif assert( p->pOrderBy!=0 ); assert( pKeyDup==0 ); /* "Managed" code needs this. Ticket #3382. */ db = pParse->db; v = pParse->pVdbe; @@ -89879,11 +89428,10 @@ /* Generate a coroutine to evaluate the SELECT statement to the ** left of the compound operator - the "A" select. */ VdbeNoopComment((v, "Begin coroutine for left SELECT")); pPrior->iLimit = regLimitA; - explainSetInteger(iSub1, pParse->iNextSelectId); sqlite3Select(pParse, pPrior, &destA); sqlite3VdbeAddOp2(v, OP_Integer, 1, regEofA); sqlite3VdbeAddOp1(v, OP_Yield, regAddrA); VdbeNoopComment((v, "End coroutine for left SELECT")); @@ -89894,11 +89442,10 @@ VdbeNoopComment((v, "Begin coroutine for right SELECT")); savedLimit = p->iLimit; savedOffset = p->iOffset; p->iLimit = regLimitB; p->iOffset = 0; - explainSetInteger(iSub2, pParse->iNextSelectId); sqlite3Select(pParse, p, &destB); p->iLimit = savedLimit; p->iOffset = savedOffset; sqlite3VdbeAddOp2(v, OP_Integer, 1, regEofB); sqlite3VdbeAddOp1(v, OP_Yield, regAddrB); @@ -89931,19 +89478,17 @@ }else{ addrEofA = sqlite3VdbeAddOp2(v, OP_If, regEofB, labelEnd); sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); sqlite3VdbeAddOp1(v, OP_Yield, regAddrB); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofA); - p->nSelectRow += pPrior->nSelectRow; } /* Generate a subroutine to run when the results from select B ** are exhausted and only data in select A remains. */ if( op==TK_INTERSECT ){ addrEofB = addrEofA; - if( p->nSelectRow > pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; }else{ VdbeNoopComment((v, "eof-B subroutine")); addrEofB = sqlite3VdbeAddOp2(v, OP_If, regEofA, labelEnd); sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA); sqlite3VdbeAddOp1(v, OP_Yield, regAddrA); @@ -90027,11 +89572,10 @@ } p->pPrior = pPrior; /*** TBD: Insert subroutine calls to close cursors on incomplete **** subqueries ****/ - explainComposite(pParse, p->op, iSub1, iSub2, 0); return SQLITE_OK; } #endif #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) @@ -90761,11 +90305,10 @@ pTab->nRef = 1; pTab->zName = sqlite3MPrintf(db, "sqlite_subquery_%p_", (void*)pTab); while( pSel->pPrior ){ pSel = pSel->pPrior; } selectColumnsFromExprList(pParse, pSel->pEList, &pTab->nCol, &pTab->aCol); pTab->iPKey = -1; - pTab->nRowEst = 1000000; pTab->tabFlags |= TF_Ephemeral; #endif }else{ /* An ordinary table or view name in the FROM clause */ assert( pFrom->pTab==0 ); @@ -91255,15 +90798,10 @@ int addrSortIndex; /* Address of an OP_OpenEphemeral instruction */ AggInfo sAggInfo; /* Information used by aggregate queries */ int iEnd; /* Address of the end of the query */ sqlite3 *db; /* The database connection */ -#ifndef SQLITE_OMIT_EXPLAIN - int iRestoreSelectId = pParse->iSelectId; - pParse->iSelectId = pParse->iNextSelectId++; -#endif - db = pParse->db; if( p==0 || db->mallocFailed || pParse->nErr ){ return 1; } if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; @@ -91331,14 +90869,12 @@ } i = -1; }else{ sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); assert( pItem->isPopulated==0 ); - explainSetInteger(pItem->iSelectId, pParse->iNextSelectId); sqlite3Select(pParse, pSub, &dest); pItem->isPopulated = 1; - pItem->pTab->nRowEst = (unsigned)pSub->nSelectRow; } if( /*pParse->nErr ||*/ db->mallocFailed ){ goto select_end; } pParse->nHeight -= sqlite3SelectExprHeight(p); @@ -91368,16 +90904,14 @@ pRight = pLoop; } mxSelect = db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT]; if( mxSelect && cnt>mxSelect ){ sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); - goto select_end; + return 1; } } - rc = multiSelect(pParse, p, pDest); - explainSetInteger(pParse->iSelectId, iRestoreSelectId); - return rc; + return multiSelect(pParse, p, pDest); } #endif /* If possible, rewrite the query to use GROUP BY instead of DISTINCT. ** GROUP BY might use an index, DISTINCT never does. @@ -91385,10 +90919,11 @@ assert( p->pGroupBy==0 || (p->selFlags & SF_Aggregate)!=0 ); if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ){ p->pGroupBy = sqlite3ExprListDup(db, p->pEList, 0); pGroupBy = p->pGroupBy; p->selFlags &= ~SF_Distinct; + isDistinct = 0; } /* If there is both a GROUP BY and an ORDER BY clause and they are ** identical, then disable the ORDER BY clause since the GROUP BY ** will cause elements to come out in the correct order. This is @@ -91427,16 +90962,15 @@ } /* Set the limiter. */ iEnd = sqlite3VdbeMakeLabel(v); - p->nSelectRow = (double)LARGEST_INT64; computeLimitRegisters(pParse, p, iEnd); /* Open a virtual index to use for the distinct set. */ - if( p->selFlags & SF_Distinct ){ + if( isDistinct ){ KeyInfo *pKeyInfo; assert( isAgg || pGroupBy ); distinct = pParse->nTab++; pKeyInfo = keyInfoFromExprList(pParse, p->pEList); sqlite3VdbeAddOp4(v, OP_OpenEphemeral, distinct, 0, 0, @@ -91451,11 +90985,10 @@ /* This case is for non-aggregate queries ** Begin the database scan */ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pOrderBy, 0); if( pWInfo==0 ) goto select_end; - if( pWInfo->nRowOut < p->nSelectRow ) p->nSelectRow = pWInfo->nRowOut; /* If sorting index that was created by a prior OP_OpenEphemeral ** instruction ended up not being needed, then change the OP_OpenEphemeral ** into an OP_Noop. */ @@ -91496,13 +91029,10 @@ pItem->iAlias = 0; } for(k=pGroupBy->nExpr, pItem=pGroupBy->a; k>0; k--, pItem++){ pItem->iAlias = 0; } - if( p->nSelectRow>(double)100 ) p->nSelectRow = (double)100; - }else{ - p->nSelectRow = (double)1; } /* Create a label to jump to when we want to abort the query */ addrEnd = sqlite3VdbeMakeLabel(v); @@ -91595,13 +91125,10 @@ int regBase; int regRecord; int nCol; int nGroupBy; - explainTempTable(pParse, - isDistinct && !(p->selFlags&SF_Distinct)?"DISTINCT":"GROUP BY"); - groupBySort = 1; nGroupBy = pGroupBy->nExpr; nCol = nGroupBy + 1; j = nGroupBy+1; for(i=0; i=0 ){ - explainTempTable(pParse, "DISTINCT"); - } - /* If there is an ORDER BY clause, then we need to sort the results ** and send them to the callback one by one. */ if( pOrderBy ){ - explainTempTable(pParse, "ORDER BY"); generateSortTail(pParse, p, v, pEList->nExpr, pDest); } /* Jump here to skip this query */ @@ -91884,11 +91406,10 @@ /* Control jumps to here if an error is encountered above, or upon ** successful coding of the SELECT. */ select_end: - explainSetInteger(pParse->iSelectId, iRestoreSelectId); /* Identify column names if results of the SELECT are to be output. */ if( rc==SQLITE_OK && pDest->eDest==SRT_Output ){ generateColumnNames(pParse, pTabList, pEList); @@ -94974,11 +94495,11 @@ pParse->pNewTable->nCol = 0; pParse->pNewTable->aCol = 0; } db->pVTab = 0; }else{ - sqlite3Error(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr); + sqlite3Error(db, SQLITE_ERROR, zErr); sqlite3DbFree(db, zErr); rc = SQLITE_ERROR; } pParse->declareVtab = 0; @@ -95436,10 +94957,11 @@ ** cost of pursuing that strategy. */ struct WhereCost { WherePlan plan; /* The lookup strategy */ double rCost; /* Overall cost of pursuing this search strategy */ + double nRow; /* Estimated number of output rows */ Bitmask used; /* Bitmask of cursors used by this plan */ }; /* ** Bitmasks for the operators that indices are able to exploit. An @@ -95478,15 +95000,14 @@ #define WHERE_COLUMN_EQ 0x00010000 /* x=EXPR or x IN (...) or x IS NULL */ #define WHERE_COLUMN_RANGE 0x00020000 /* xEXPR */ #define WHERE_COLUMN_IN 0x00040000 /* x IN (...) */ #define WHERE_COLUMN_NULL 0x00080000 /* x IS NULL */ #define WHERE_INDEXED 0x000f0000 /* Anything that uses an index */ -#define WHERE_NOT_FULLSCAN 0x100f3000 /* Does not do a full table scan */ +#define WHERE_NOT_FULLSCAN 0x000f3000 /* Does not do a full table scan */ #define WHERE_IN_ABLE 0x000f1000 /* Able to support an IN operator */ #define WHERE_TOP_LIMIT 0x00100000 /* xEXPR or x>=EXPR constraint */ -#define WHERE_BOTH_LIMIT 0x00300000 /* Both x>EXPR and xiCursor; /* The cursor of the table to be accessed */ const Bitmask maskSrc = getMask(pWC->pMaskSet, iCur); /* Bitmask for pSrc */ WhereTerm * const pWCEnd = &pWC->a[pWC->nTerm]; /* End of pWC->a[] */ WhereTerm *pTerm; /* A single term of the WHERE clause */ - /* No OR-clause optimization allowed if the INDEXED BY or NOT INDEXED clauses - ** are used */ - if( pSrc->notIndexed || pSrc->pIndex!=0 ){ + /* No OR-clause optimization allowed if the NOT INDEXED clause is used */ + if( pSrc->notIndexed ){ return; } /* Search the WHERE clause terms for a usable WO_OR term. */ for(pTerm=pWC->a; pTerm=pCost->rCost ) break; } /* If there is an ORDER BY clause, increase the scan cost to account @@ -96884,12 +96404,12 @@ ** less than the current cost stored in pCost, replace the contents ** of pCost. */ WHERETRACE(("... multi-index OR cost=%.9g nrow=%.9g\n", rTotal, nRow)); if( rTotalrCost ){ pCost->rCost = rTotal; + pCost->nRow = nRow; pCost->used = used; - pCost->plan.nRow = nRow; pCost->plan.wsFlags = flags; pCost->plan.u.pTerm = pTerm; } } } @@ -96969,11 +96489,11 @@ for(pTerm=pWC->a; pTermrCost, costTempIdx)); pCost->rCost = costTempIdx; - pCost->plan.nRow = logN + 1; + pCost->nRow = logN + 1; pCost->plan.wsFlags = WHERE_TEMP_INDEX; pCost->used = pTerm->prereqRight; break; } } @@ -98042,15 +97562,15 @@ /* If this index is the best we have seen so far, then record this ** index and its cost in the pCost structure. */ if( (!pIdx || wsFlags) - && (costrCost || (cost<=pCost->rCost && nRowplan.nRow)) + && (costrCost || (cost<=pCost->rCost && nRownRow)) ){ pCost->rCost = cost; + pCost->nRow = nRow; pCost->used = used; - pCost->plan.nRow = nRow; pCost->plan.wsFlags = (wsFlags&wsFlagMask); pCost->plan.nEq = nEq; pCost->plan.u.pIdx = pIdx; } @@ -98374,165 +97894,10 @@ } } *pzAff = zAff; return regBase; } - -#ifndef SQLITE_OMIT_EXPLAIN -/* -** This routine is a helper for explainIndexRange() below -** -** pStr holds the text of an expression that we are building up one term -** at a time. This routine adds a new term to the end of the expression. -** Terms are separated by AND so add the "AND" text for second and subsequent -** terms only. -*/ -static void explainAppendTerm( - StrAccum *pStr, /* The text expression being built */ - int iTerm, /* Index of this term. First is zero */ - const char *zColumn, /* Name of the column */ - const char *zOp /* Name of the operator */ -){ - if( iTerm ) sqlite3StrAccumAppend(pStr, " AND ", 5); - sqlite3StrAccumAppend(pStr, zColumn, -1); - sqlite3StrAccumAppend(pStr, zOp, 1); - sqlite3StrAccumAppend(pStr, "?", 1); -} - -/* -** Argument pLevel describes a strategy for scanning table pTab. This -** function returns a pointer to a string buffer containing a description -** of the subset of table rows scanned by the strategy in the form of an -** SQL expression. Or, if all rows are scanned, NULL is returned. -** -** For example, if the query: -** -** SELECT * FROM t1 WHERE a=1 AND b>2; -** -** is run and there is an index on (a, b), then this function returns a -** string similar to: -** -** "a=? AND b>?" -** -** The returned pointer points to memory obtained from sqlite3DbMalloc(). -** It is the responsibility of the caller to free the buffer when it is -** no longer required. -*/ -static char *explainIndexRange(sqlite3 *db, WhereLevel *pLevel, Table *pTab){ - WherePlan *pPlan = &pLevel->plan; - Index *pIndex = pPlan->u.pIdx; - int nEq = pPlan->nEq; - int i, j; - Column *aCol = pTab->aCol; - int *aiColumn = pIndex->aiColumn; - StrAccum txt; - - if( nEq==0 && (pPlan->wsFlags & (WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ){ - return 0; - } - sqlite3StrAccumInit(&txt, 0, 0, SQLITE_MAX_LENGTH); - txt.db = db; - sqlite3StrAccumAppend(&txt, " (", 2); - for(i=0; i"); - } - if( pPlan->wsFlags&WHERE_TOP_LIMIT ){ - explainAppendTerm(&txt, i, aCol[aiColumn[j]].zName, "<"); - } - sqlite3StrAccumAppend(&txt, ")", 1); - return sqlite3StrAccumFinish(&txt); -} - -/* -** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command. If the query being compiled is an EXPLAIN QUERY PLAN, a single -** record is added to the output to describe the table scan strategy in -** pLevel. -*/ -static void explainOneScan( - Parse *pParse, /* Parse context */ - SrcList *pTabList, /* Table list this loop refers to */ - WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ - int iLevel, /* Value for "level" column of output */ - int iFrom, /* Value for "from" column of output */ - u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ -){ - if( pParse->explain==2 ){ - u32 flags = pLevel->plan.wsFlags; - struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; - Vdbe *v = pParse->pVdbe; /* VM being constructed */ - sqlite3 *db = pParse->db; /* Database handle */ - char *zMsg; /* Text to add to EQP output */ - sqlite3_int64 nRow; /* Expected number of rows visited by scan */ - int iId = pParse->iSelectId; /* Select id (left-most output column) */ - int isSearch; /* True for a SEARCH. False for SCAN. */ - - if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_ONETABLE_ONLY) ) return; - - isSearch = (pLevel->plan.nEq>0) - || (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 - || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX)); - - zMsg = sqlite3MPrintf(db, "%s", isSearch?"SEARCH":"SCAN"); - if( pItem->pSelect ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s SUBQUERY %d", zMsg,pItem->iSelectId); - }else{ - zMsg = sqlite3MAppendf(db, zMsg, "%s TABLE %s", zMsg, pItem->zName); - } - - if( pItem->zAlias ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s AS %s", zMsg, pItem->zAlias); - } - if( (flags & WHERE_INDEXED)!=0 ){ - char *zWhere = explainIndexRange(db, pLevel, pItem->pTab); - zMsg = sqlite3MAppendf(db, zMsg, "%s USING %s%sINDEX%s%s%s", zMsg, - ((flags & WHERE_TEMP_INDEX)?"AUTOMATIC ":""), - ((flags & WHERE_IDX_ONLY)?"COVERING ":""), - ((flags & WHERE_TEMP_INDEX)?"":" "), - ((flags & WHERE_TEMP_INDEX)?"": pLevel->plan.u.pIdx->zName), - zWhere - ); - sqlite3DbFree(db, zWhere); - }else if( flags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s USING INTEGER PRIMARY KEY", zMsg); - - if( flags&WHERE_ROWID_EQ ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid=?)", zMsg); - }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid>? AND rowid?)", zMsg); - }else if( flags&WHERE_TOP_LIMIT ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s (rowidplan.u.pVtabIdx; - zMsg = sqlite3MAppendf(db, zMsg, "%s VIRTUAL TABLE INDEX %d:%s", zMsg, - pVtabIdx->idxNum, pVtabIdx->idxStr); - } -#endif - if( wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX) ){ - testcase( wctrlFlags & WHERE_ORDERBY_MIN ); - nRow = 1; - }else{ - nRow = (sqlite3_int64)pLevel->plan.nRow; - } - zMsg = sqlite3MAppendf(db, zMsg, "%s (~%lld rows)", zMsg, nRow); - sqlite3VdbeAddOp4(v, OP_Explain, iId, iLevel, iFrom, zMsg, P4_DYNAMIC); - } -} -#else -# define explainOneScan(u,v,w,x,y,z) -#endif /* SQLITE_OMIT_EXPLAIN */ - /* ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. */ @@ -98937,11 +98302,11 @@ ** If it is, jump to the next iteration of the loop. */ r1 = sqlite3GetTempReg(pParse); testcase( pLevel->plan.wsFlags & WHERE_BTM_LIMIT ); testcase( pLevel->plan.wsFlags & WHERE_TOP_LIMIT ); - if( (pLevel->plan.wsFlags & (WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 ){ + if( pLevel->plan.wsFlags & (WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) ){ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, nEq, r1); sqlite3VdbeAddOp2(v, OP_IsNull, r1, addrCont); } sqlite3ReleaseTempReg(pParse, r1); @@ -99071,13 +98436,10 @@ /* Loop through table entries that match term pOrTerm. */ pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrTerm->pExpr, 0, WHERE_OMIT_OPEN | WHERE_OMIT_CLOSE | WHERE_FORCE_TABLE | WHERE_ONETABLE_ONLY); if( pSubWInfo ){ - explainOneScan( - pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0 - ); if( (wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ int iSet = ((ii==pOrWc->nTerm-1)?-1:ii); int r; r = sqlite3ExprCodeGetColumn(pParse, pTabItem->pTab, -1, iCur, regRowid); @@ -99469,11 +98831,10 @@ int nUnconstrained; /* Number tables without INDEXED BY */ Bitmask notIndexed; /* Mask of tables that cannot use an index */ memset(&bestPlan, 0, sizeof(bestPlan)); bestPlan.rCost = SQLITE_BIG_DBL; - WHERETRACE(("*** Begin search for loop %d ***\n", i)); /* Loop through the remaining entries in the FROM clause to find the ** next nested loop. The loop tests all FROM clause entries ** either once or twice. ** @@ -99534,12 +98895,10 @@ } mask = (isOptimal ? m : notReady); pOrderBy = ((i==0 && ppOrderBy )?*ppOrderBy:0); if( pTabItem->pIndex==0 ) nUnconstrained++; - WHERETRACE(("=== trying table %d with isOptimal=%d ===\n", - j, isOptimal)); assert( pTabItem->pTab ); #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTabItem->pTab) ){ sqlite3_index_info **pp = &pWInfo->a[j].pIdxInfo; bestVirtualIndex(pParse, pWC, pTabItem, mask, notReady, pOrderBy, @@ -99586,27 +98945,24 @@ && (bestJ<0 || (notIndexed&m)!=0 /* (2) */ || (sCost.plan.wsFlags & WHERE_NOT_FULLSCAN)!=0) && (nUnconstrained==0 || pTabItem->pIndex==0 /* (3) */ || NEVER((sCost.plan.wsFlags & WHERE_NOT_FULLSCAN)!=0)) && (bestJ<0 || sCost.rCost=0 ); assert( notReady & getMask(pMaskSet, pTabList->a[bestJ].iCursor) ); - WHERETRACE(("*** Optimizer selects table %d for loop %d" - " with cost=%g and nRow=%g\n", - bestJ, pLevel-pWInfo->a, bestPlan.rCost, bestPlan.plan.nRow)); + WHERETRACE(("*** Optimizer selects table %d for loop %d\n", bestJ, + pLevel-pWInfo->a)); if( (bestPlan.plan.wsFlags & WHERE_ORDERBY)!=0 ){ *ppOrderBy = 0; } andFlags &= bestPlan.plan.wsFlags; pLevel->plan = bestPlan.plan; @@ -99617,13 +98973,11 @@ }else{ pLevel->iIdxCur = -1; } notReady &= ~getMask(pMaskSet, pTabList->a[bestJ].iCursor); pLevel->iFrom = (u8)bestJ; - if( bestPlan.plan.nRow>=(double)1 ){ - pParse->nQueryLoop *= bestPlan.plan.nRow; - } + if( bestPlan.nRow>=(double)1 ) pParse->nQueryLoop *= bestPlan.nRow; /* Check that if the table scanned by this loop iteration had an ** INDEXED BY clause attached to it, that the named index is being ** used for the scan. If not, then query compilation has failed. ** Return an error. @@ -99667,19 +99021,48 @@ /* Open all tables in the pTabList and any indices selected for ** searching those tables. */ sqlite3CodeVerifySchema(pParse, -1); /* Insert the cookie verifier Goto */ notReady = ~(Bitmask)0; - pWInfo->nRowOut = (double)1; for(i=0, pLevel=pWInfo->a; iexplain==2 ){ + char *zMsg; + struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; + zMsg = sqlite3MPrintf(db, "TABLE %s", pItem->zName); + if( pItem->zAlias ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s AS %s", zMsg, pItem->zAlias); + } + if( (pLevel->plan.wsFlags & WHERE_TEMP_INDEX)!=0 ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s WITH AUTOMATIC INDEX", zMsg); + }else if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s WITH INDEX %s", + zMsg, pLevel->plan.u.pIdx->zName); + }else if( pLevel->plan.wsFlags & WHERE_MULTI_OR ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s VIA MULTI-INDEX UNION", zMsg); + }else if( pLevel->plan.wsFlags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s USING PRIMARY KEY", zMsg); + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + else if( (pLevel->plan.wsFlags & WHERE_VIRTUALTABLE)!=0 ){ + sqlite3_index_info *pVtabIdx = pLevel->plan.u.pVtabIdx; + zMsg = sqlite3MAppendf(db, zMsg, "%s VIRTUAL TABLE INDEX %d:%s", zMsg, + pVtabIdx->idxNum, pVtabIdx->idxStr); + } +#endif + if( pLevel->plan.wsFlags & WHERE_ORDERBY ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s ORDER BY", zMsg); + } + sqlite3VdbeAddOp4(v, OP_Explain, i, pLevel->iFrom, 0, zMsg, P4_DYNAMIC); + } +#endif /* SQLITE_OMIT_EXPLAIN */ pTabItem = &pTabList->a[pLevel->iFrom]; pTab = pTabItem->pTab; pLevel->iTabCur = pTabItem->iCursor; - pWInfo->nRowOut *= pLevel->plan.nRow; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ){ /* Do nothing */ }else #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -99731,14 +99114,12 @@ ** loop below generates code for a single nested loop of the VM ** program. */ notReady = ~(Bitmask)0; for(i=0; ia[i]; - explainOneScan(pParse, pTabList, pLevel, i, pLevel->iFrom, wctrlFlags); notReady = codeOneLoopStart(pWInfo, i, wctrlFlags, notReady); - pWInfo->iContinue = pLevel->addrCont; + pWInfo->iContinue = pWInfo->a[i].addrCont; } #ifdef SQLITE_TEST /* For testing and debugging use only */ /* Record in the query plan information about the current table ** and the index used to access it (if any). If the table itself @@ -106136,16 +105517,17 @@ } } } pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 1); - if( pColl==0 ) return SQLITE_NOMEM; - pColl->xCmp = xCompare; - pColl->pUser = pCtx; - pColl->xDel = xDel; - pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED)); - pColl->type = collType; + if( pColl ){ + pColl->xCmp = xCompare; + pColl->pUser = pCtx; + pColl->xDel = xDel; + pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED)); + pColl->type = collType; + } sqlite3Error(db, SQLITE_OK, 0); return SQLITE_OK; } @@ -108109,18 +107491,12 @@ #ifndef SQLITE_AMALGAMATION /* ** Macros indicating that conditional expressions are always true or ** false. */ -#ifdef SQLITE_COVERAGE_TEST -# define ALWAYS(x) (1) -# define NEVER(X) (0) -#else # define ALWAYS(x) (x) # define NEVER(X) (x) -#endif - /* ** Internal types used by SQLite. */ typedef unsigned char u8; /* 1-byte (or larger) unsigned integer */ typedef short int i16; /* 2-byte (or larger) signed integer */ @@ -108134,16 +107510,12 @@ typedef struct Fts3Table Fts3Table; typedef struct Fts3Cursor Fts3Cursor; typedef struct Fts3Expr Fts3Expr; typedef struct Fts3Phrase Fts3Phrase; -typedef struct Fts3PhraseToken Fts3PhraseToken; - -typedef struct Fts3SegFilter Fts3SegFilter; -typedef struct Fts3DeferredToken Fts3DeferredToken; typedef struct Fts3SegReader Fts3SegReader; -typedef struct Fts3SegReaderArray Fts3SegReaderArray; +typedef struct Fts3SegFilter Fts3SegFilter; /* ** A connection to a fulltext index is an instance of the following ** structure. The xCreate and xConnect methods create an instance ** of this structure and xDestroy and xDisconnect free that instance. @@ -108160,18 +107532,26 @@ sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ /* Precompiled statements used by the implementation. Each of these ** statements is run and reset within a single virtual table API call. */ - sqlite3_stmt *aStmt[24]; + sqlite3_stmt *aStmt[25]; + + /* Pointer to string containing the SQL: + ** + ** "SELECT block FROM %_segments WHERE blockid BETWEEN ? AND ? + ** ORDER BY blockid" + */ + char *zSelectLeaves; + int nLeavesStmt; /* Valid statements in aLeavesStmt */ + int nLeavesTotal; /* Total number of prepared leaves stmts */ + int nLeavesAlloc; /* Allocated size of aLeavesStmt */ + sqlite3_stmt **aLeavesStmt; /* Array of prepared zSelectLeaves stmts */ int nNodeSize; /* Soft limit for node size */ - u8 bHasStat; /* True if %_stat table exists */ + u8 bHasContent; /* True if %_content table exists */ u8 bHasDocsize; /* True if %_docsize table exists */ - int nPgsz; /* Page size for host database */ - char *zSegmentsTbl; /* Name of %_segments table */ - sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ /* The following hash table is used to buffer pending index updates during ** transactions. Variable nPendingData estimates the memory size of the ** pending data, including hash table overhead, but not malloc overhead. ** When nPendingData exceeds nMaxPendingData, the buffer is flushed @@ -108194,25 +107574,18 @@ i16 eSearch; /* Search strategy (see below) */ u8 isEof; /* True if at End Of Results */ u8 isRequireSeek; /* True if must seek pStmt to %_content row */ sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ Fts3Expr *pExpr; /* Parsed MATCH query string */ - Fts3DeferredToken *pDeferred; /* Deferred search tokens, if any */ sqlite3_int64 iPrevId; /* Previous id read from aDoclist */ char *pNextId; /* Pointer into the body of aDoclist */ char *aDoclist; /* List of docids for full-text queries */ int nDoclist; /* Size of buffer at aDoclist */ int isMatchinfoNeeded; /* True when aMatchinfo[] needs filling in */ u32 *aMatchinfo; /* Information about most recent match */ - int eEvalmode; /* An FTS3_EVAL_XX constant */ - int nRowAvg; /* Average size of database rows, in pages */ }; -#define FTS3_EVAL_FILTER 0 -#define FTS3_EVAL_NEXT 1 -#define FTS3_EVAL_MATCHINFO 2 - /* ** The Fts3Cursor.eSearch member is always set to one of the following. ** Actualy, Fts3Cursor.eSearch can be greater than or equal to ** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index ** of the column to be searched. For example, in @@ -108231,34 +107604,22 @@ #define FTS3_FULLTEXT_SEARCH 2 /* Full-text index search */ /* ** A "phrase" is a sequence of one or more tokens that must match in ** sequence. A single token is the base case and the most common case. -** For a sequence of tokens contained in double-quotes (i.e. "one two three") -** nToken will be the number of tokens in the string. -** -** The nDocMatch and nMatch variables contain data that may be used by the -** matchinfo() function. They are populated when the full-text index is -** queried for hits on the phrase. If one or more tokens in the phrase -** are deferred, the nDocMatch and nMatch variables are populated based -** on the assumption that the +** For a sequence of tokens contained in "...", nToken will be the number +** of tokens in the string. */ -struct Fts3PhraseToken { - char *z; /* Text of the token */ - int n; /* Number of bytes in buffer z */ - int isPrefix; /* True if token ends with a "*" character */ - int bFulltext; /* True if full-text index was used */ - Fts3SegReaderArray *pArray; /* Segment-reader for this token */ - Fts3DeferredToken *pDeferred; /* Deferred token object for this token */ -}; - struct Fts3Phrase { - /* Variables populated by fts3_expr.c when parsing a MATCH expression */ int nToken; /* Number of tokens in the phrase */ int iColumn; /* Index of column this phrase must match */ int isNot; /* Phrase prefixed by unary not (-) operator */ - Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ + struct PhraseToken { + char *z; /* Text of the token */ + int n; /* Number of bytes in buffer pointed to by z */ + int isPrefix; /* True if token ends in with a "*" character */ + } aToken[1]; /* One entry for each token in the phrase */ }; /* ** A tree of these objects forms the RHS of a MATCH operator. ** @@ -108304,10 +107665,15 @@ #define FTSQUERY_AND 3 #define FTSQUERY_OR 4 #define FTSQUERY_PHRASE 5 +/* fts3_init.c */ +SQLITE_PRIVATE int sqlite3Fts3DeleteVtab(int, sqlite3_vtab *); +SQLITE_PRIVATE int sqlite3Fts3InitVtab(int, sqlite3*, void*, int, const char*const*, + sqlite3_vtab **, char **); + /* fts3_write.c */ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(sqlite3_vtab*,int,sqlite3_value**,sqlite3_int64*); SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *); SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *); SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *); @@ -108317,24 +107683,15 @@ SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3Table *, Fts3SegReader *); SQLITE_PRIVATE int sqlite3Fts3SegReaderIterate( Fts3Table *, Fts3SegReader **, int, Fts3SegFilter *, int (*)(Fts3Table *, void *, char *, int, char *, int), void * ); -SQLITE_PRIVATE int sqlite3Fts3SegReaderCost(Fts3Cursor *, Fts3SegReader *, int *); +SQLITE_PRIVATE int sqlite3Fts3ReadBlock(Fts3Table*, sqlite3_int64, char const**, int*); SQLITE_PRIVATE int sqlite3Fts3AllSegdirs(Fts3Table*, sqlite3_stmt **); SQLITE_PRIVATE int sqlite3Fts3MatchinfoDocsizeLocal(Fts3Cursor*, u32*); SQLITE_PRIVATE int sqlite3Fts3MatchinfoDocsizeGlobal(Fts3Cursor*, u32*); SQLITE_PRIVATE int sqlite3Fts3ReadLock(Fts3Table *); -SQLITE_PRIVATE int sqlite3Fts3ReadBlock(Fts3Table*, sqlite3_int64, char **, int*); - -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *); -SQLITE_PRIVATE int sqlite3Fts3DeferToken(Fts3Cursor *, Fts3PhraseToken *, int); -SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *); -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *); -SQLITE_PRIVATE char *sqlite3Fts3DeferredDoclist(Fts3DeferredToken *, int *); - -SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *); /* Flags allowed as part of the 4th argument to SegmentReaderIterate() */ #define FTS3_SEGMENT_REQUIRE_POS 0x00000001 #define FTS3_SEGMENT_IGNORE_EMPTY 0x00000002 #define FTS3_SEGMENT_COLUMN_FILTER 0x00000004 @@ -108354,21 +107711,19 @@ SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *, int *); SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64); SQLITE_PRIVATE void sqlite3Fts3Dequote(char *); SQLITE_PRIVATE char *sqlite3Fts3FindPositions(Fts3Expr *, sqlite3_int64, int); -SQLITE_PRIVATE int sqlite3Fts3ExprLoadDoclist(Fts3Cursor *, Fts3Expr *); -SQLITE_PRIVATE int sqlite3Fts3ExprLoadFtDoclist(Fts3Cursor *, Fts3Expr *, char **, int *); +SQLITE_PRIVATE int sqlite3Fts3ExprLoadDoclist(Fts3Table *, Fts3Expr *); SQLITE_PRIVATE int sqlite3Fts3ExprNearTrim(Fts3Expr *, Fts3Expr *, int); /* fts3_tokenizer.c */ SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *, int *); SQLITE_PRIVATE int sqlite3Fts3InitHashTable(sqlite3 *, Fts3Hash *, const char *); -SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, const char *, - sqlite3_tokenizer **, char ** +SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, + const char *, sqlite3_tokenizer **, const char **, char ** ); -SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char); /* fts3_snippet.c */ SQLITE_PRIVATE void sqlite3Fts3Offsets(sqlite3_context*, Fts3Cursor*); SQLITE_PRIVATE void sqlite3Fts3Snippet(sqlite3_context *, Fts3Cursor *, const char *, const char *, const char *, int, int @@ -108520,17 +107875,20 @@ static int fts3DisconnectMethod(sqlite3_vtab *pVtab){ Fts3Table *p = (Fts3Table *)pVtab; int i; assert( p->nPendingData==0 ); - assert( p->pSegments==0 ); /* Free any prepared statements held */ for(i=0; iaStmt); i++){ sqlite3_finalize(p->aStmt[i]); } - sqlite3_free(p->zSegmentsTbl); + for(i=0; inLeavesStmt; i++){ + sqlite3_finalize(p->aLeavesStmt[i]); + } + sqlite3_free(p->zSelectLeaves); + sqlite3_free(p->aLeavesStmt); /* Invoke the tokenizer destructor to free the tokenizer. */ p->pTokenizer->pModule->xDestroy(p->pTokenizer); sqlite3_free(p); @@ -108537,11 +107895,11 @@ return SQLITE_OK; } /* ** Construct one or more SQL statements from the format string given -** and then evaluate those statements. The success code is written +** and then evaluate those statements. The success code is writting ** into *pRc. ** ** If *pRc is initially non-zero then this routine is a no-op. */ static void fts3DbExec( @@ -108589,42 +107947,37 @@ /* ** Invoke sqlite3_declare_vtab() to declare the schema for the FTS3 table ** passed as the first argument. This is done as part of the xConnect() ** and xCreate() methods. -** -** If *pRc is non-zero when this function is called, it is a no-op. -** Otherwise, if an error occurs, an SQLite error code is stored in *pRc -** before returning. */ -static void fts3DeclareVtab(int *pRc, Fts3Table *p){ - if( *pRc==SQLITE_OK ){ - int i; /* Iterator variable */ - int rc; /* Return code */ - char *zSql; /* SQL statement passed to declare_vtab() */ - char *zCols; /* List of user defined columns */ - - /* Create a list of user columns for the virtual table */ - zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); - for(i=1; zCols && inColumn; i++){ - zCols = sqlite3_mprintf("%z%Q, ", zCols, p->azColumn[i]); - } - - /* Create the whole "CREATE TABLE" statement to pass to SQLite */ - zSql = sqlite3_mprintf( - "CREATE TABLE x(%s %Q HIDDEN, docid HIDDEN)", zCols, p->zName - ); - if( !zCols || !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_declare_vtab(p->db, zSql); - } - - sqlite3_free(zSql); - sqlite3_free(zCols); - *pRc = rc; - } +static int fts3DeclareVtab(Fts3Table *p){ + int i; /* Iterator variable */ + int rc; /* Return code */ + char *zSql; /* SQL statement passed to declare_vtab() */ + char *zCols; /* List of user defined columns */ + + /* Create a list of user columns for the virtual table */ + zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); + for(i=1; zCols && inColumn; i++){ + zCols = sqlite3_mprintf("%z%Q, ", zCols, p->azColumn[i]); + } + + /* Create the whole "CREATE TABLE" statement to pass to SQLite */ + zSql = sqlite3_mprintf( + "CREATE TABLE x(%s %Q HIDDEN, docid HIDDEN)", zCols, p->zName + ); + + if( !zCols || !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_declare_vtab(p->db, zSql); + } + + sqlite3_free(zSql); + sqlite3_free(zCols); + return rc; } /* ** Create the backing store tables (%_content, %_segments and %_segdir) ** required by the FTS3 table passed as the only argument. This is done @@ -108639,23 +107992,25 @@ int i; /* Iterator variable */ char *zContentCols; /* Columns of %_content table */ sqlite3 *db = p->db; /* The database connection */ /* Create a list of user columns for the content table */ - zContentCols = sqlite3_mprintf("docid INTEGER PRIMARY KEY"); - for(i=0; zContentCols && inColumn; i++){ - char *z = p->azColumn[i]; - zContentCols = sqlite3_mprintf("%z, 'c%d%q'", zContentCols, i, z); - } - if( zContentCols==0 ) rc = SQLITE_NOMEM; - - /* Create the content table */ - fts3DbExec(&rc, db, - "CREATE TABLE %Q.'%q_content'(%s)", - p->zDb, p->zName, zContentCols - ); - sqlite3_free(zContentCols); + if( p->bHasContent ){ + zContentCols = sqlite3_mprintf("docid INTEGER PRIMARY KEY"); + for(i=0; zContentCols && inColumn; i++){ + char *z = p->azColumn[i]; + zContentCols = sqlite3_mprintf("%z, 'c%d%q'", zContentCols, i, z); + } + if( zContentCols==0 ) rc = SQLITE_NOMEM; + + /* Create the content table */ + fts3DbExec(&rc, db, + "CREATE TABLE %Q.'%q_content'(%s)", + p->zDb, p->zName, zContentCols + ); + sqlite3_free(zContentCols); + } /* Create other tables */ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_segments'(blockid INTEGER PRIMARY KEY, block BLOB);", p->zDb, p->zName ); @@ -108674,12 +108029,10 @@ if( p->bHasDocsize ){ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);", p->zDb, p->zName ); - } - if( p->bHasStat ){ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_stat'(id INTEGER PRIMARY KEY, value BLOB);", p->zDb, p->zName ); } @@ -108720,70 +108073,10 @@ sqlite3_free(zSql); *pResult = (u8)(res & 0xff); if( rc!=SQLITE_ABORT ) *pRc = rc; } -/* -** Store the current database page-size in bytes in p->nPgsz. -** -** If *pRc is non-zero when this function is called, it is a no-op. -** Otherwise, if an error occurs, an SQLite error code is stored in *pRc -** before returning. -*/ -static void fts3DatabasePageSize(int *pRc, Fts3Table *p){ - if( *pRc==SQLITE_OK ){ - int rc; /* Return code */ - char *zSql; /* SQL text "PRAGMA %Q.page_size" */ - sqlite3_stmt *pStmt; /* Compiled "PRAGMA %Q.page_size" statement */ - - zSql = sqlite3_mprintf("PRAGMA %Q.page_size", p->zDb); - if( !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_step(pStmt); - p->nPgsz = sqlite3_column_int(pStmt, 0); - rc = sqlite3_finalize(pStmt); - } - } - assert( p->nPgsz>0 || rc!=SQLITE_OK ); - sqlite3_free(zSql); - *pRc = rc; - } -} - -/* -** "Special" FTS4 arguments are column specifications of the following form: -** -** = -** -** There may not be whitespace surrounding the "=" character. The -** term may be quoted, but the may not. -*/ -static int fts3IsSpecialColumn( - const char *z, - int *pnKey, - char **pzValue -){ - char *zValue; - const char *zCsr = z; - - while( *zCsr!='=' ){ - if( *zCsr=='\0' ) return 0; - zCsr++; - } - - *pnKey = zCsr-z; - zValue = sqlite3_mprintf("%s", &zCsr[1]); - if( zValue ){ - sqlite3Fts3Dequote(zValue); - } - *pzValue = zValue; - return 1; -} - /* ** This function is the implementation of both the xConnect and xCreate ** methods of the FTS3 virtual table. ** ** The argv[] array contains the following: @@ -108801,103 +108094,48 @@ const char * const *argv, /* xCreate/xConnect argument array */ sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ char **pzErr /* Write any error message here */ ){ Fts3Hash *pHash = (Fts3Hash *)pAux; - Fts3Table *p = 0; /* Pointer to allocated vtab */ - int rc = SQLITE_OK; /* Return code */ + Fts3Table *p; /* Pointer to allocated vtab */ + int rc; /* Return code */ int i; /* Iterator variable */ int nByte; /* Size of allocation used for *p */ int iCol; /* Column index */ int nString = 0; /* Bytes required to hold all column names */ int nCol = 0; /* Number of columns in the FTS table */ char *zCsr; /* Space for holding column names */ int nDb; /* Bytes required to hold database name */ int nName; /* Bytes required to hold table name */ - int isFts4 = (argv[0][3]=='4'); /* True for FTS4, false for FTS3 */ - int bNoDocsize = 0; /* True to omit %_docsize table */ - const char **aCol; /* Array of column names */ + + const char *zTokenizer = 0; /* Name of tokenizer to use */ sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */ - assert( strlen(argv[0])==4 ); - assert( (sqlite3_strnicmp(argv[0], "fts4", 4)==0 && isFts4) - || (sqlite3_strnicmp(argv[0], "fts3", 4)==0 && !isFts4) - ); - nDb = (int)strlen(argv[1]) + 1; nName = (int)strlen(argv[2]) + 1; - - aCol = (const char **)sqlite3_malloc(sizeof(const char *) * (argc-2) ); - if( !aCol ) return SQLITE_NOMEM; - memset(aCol, 0, sizeof(const char *) * (argc-2)); - - /* Loop through all of the arguments passed by the user to the FTS3/4 - ** module (i.e. all the column names and special arguments). This loop - ** does the following: - ** - ** + Figures out the number of columns the FTSX table will have, and - ** the number of bytes of space that must be allocated to store copies - ** of the column names. - ** - ** + If there is a tokenizer specification included in the arguments, - ** initializes the tokenizer pTokenizer. - */ - for(i=3; rc==SQLITE_OK && i8 - && 0==sqlite3_strnicmp(z, "tokenize", 8) - && 0==sqlite3Fts3IsIdChar(z[8]) - ){ - rc = sqlite3Fts3InitTokenizer(pHash, &z[9], &pTokenizer, pzErr); - } - - /* Check if it is an FTS4 special argument. */ - else if( isFts4 && fts3IsSpecialColumn(z, &nKey, &zVal) ){ - if( !zVal ){ - rc = SQLITE_NOMEM; - goto fts3_init_out; - } - if( nKey==9 && 0==sqlite3_strnicmp(z, "matchinfo", 9) ){ - if( strlen(zVal)==4 && 0==sqlite3_strnicmp(zVal, "fts3", 4) ){ - bNoDocsize = 1; - }else{ - *pzErr = sqlite3_mprintf("unrecognized matchinfo: %s", zVal); - rc = SQLITE_ERROR; - } - }else{ - *pzErr = sqlite3_mprintf("unrecognized parameter: %s", z); - rc = SQLITE_ERROR; - } - sqlite3_free(zVal); - } - - /* Otherwise, the argument is a column name. */ - else { + rc = sqlite3Fts3InitTokenizer(pHash, z, &pTokenizer, &zTokenizer, pzErr); + if( rc!=SQLITE_OK ){ + return rc; + } + if( z!=zTokenizer ){ nString += (int)(strlen(z) + 1); - aCol[nCol++] = z; } } - if( rc!=SQLITE_OK ) goto fts3_init_out; + nCol = argc - 3 - (zTokenizer!=0); + if( zTokenizer==0 ){ + rc = sqlite3Fts3InitTokenizer(pHash, 0, &pTokenizer, 0, pzErr); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( pTokenizer ); + } if( nCol==0 ){ - assert( nString==0 ); - aCol[0] = "content"; - nString = 8; nCol = 1; } - - if( pTokenizer==0 ){ - rc = sqlite3Fts3InitTokenizer(pHash, "simple", &pTokenizer, pzErr); - if( rc!=SQLITE_OK ) goto fts3_init_out; - } - assert( pTokenizer ); - /* Allocate and populate the Fts3Table structure. */ nByte = sizeof(Fts3Table) + /* Fts3Table */ nCol * sizeof(char *) + /* azColumn */ nName + /* zName */ @@ -108907,70 +108145,77 @@ if( p==0 ){ rc = SQLITE_NOMEM; goto fts3_init_out; } memset(p, 0, nByte); + p->db = db; p->nColumn = nCol; p->nPendingData = 0; p->azColumn = (char **)&p[1]; p->pTokenizer = pTokenizer; p->nNodeSize = 1000; p->nMaxPendingData = FTS3_MAX_PENDING_DATA; - p->bHasDocsize = (isFts4 && bNoDocsize==0); - p->bHasStat = isFts4; + zCsr = (char *)&p->azColumn[nCol]; + fts3HashInit(&p->pendingTerms, FTS3_HASH_STRING, 1); /* Fill in the zName and zDb fields of the vtab structure. */ - zCsr = (char *)&p->azColumn[nCol]; p->zName = zCsr; memcpy(zCsr, argv[2], nName); zCsr += nName; p->zDb = zCsr; memcpy(zCsr, argv[1], nDb); zCsr += nDb; /* Fill in the azColumn array */ - for(iCol=0; iColazColumn[iCol] = zCsr; - zCsr += n+1; - assert( zCsr <= &((char *)p)[nByte] ); + iCol = 0; + for(i=3; iazColumn[iCol++] = zCsr; + zCsr += n+1; + assert( zCsr <= &((char *)p)[nByte] ); + } + } + if( iCol==0 ){ + assert( nCol==1 ); + p->azColumn[0] = "content"; } /* If this is an xCreate call, create the underlying tables in the ** database. TODO: For xConnect(), it could verify that said tables exist. */ if( isCreate ){ + p->bHasContent = 1; + p->bHasDocsize = argv[0][3]=='4'; rc = fts3CreateTables(p); - } - - /* Figure out the page-size for the database. This is required in order to - ** estimate the cost of loading large doclists from the database (see - ** function sqlite3Fts3SegReaderCost() for details). - */ - fts3DatabasePageSize(&rc, p); - - /* Declare the table schema to SQLite. */ - fts3DeclareVtab(&rc, p); + }else{ + rc = SQLITE_OK; + fts3TableExists(&rc, db, argv[1], argv[2], "_content", &p->bHasContent); + fts3TableExists(&rc, db, argv[1], argv[2], "_docsize", &p->bHasDocsize); + } + if( rc!=SQLITE_OK ) goto fts3_init_out; + + rc = fts3DeclareVtab(p); + if( rc!=SQLITE_OK ) goto fts3_init_out; + + *ppVTab = &p->base; fts3_init_out: - - sqlite3_free(aCol); + assert( p || (pTokenizer && rc!=SQLITE_OK) ); if( rc!=SQLITE_OK ){ if( p ){ fts3DisconnectMethod((sqlite3_vtab *)p); - }else if( pTokenizer ){ + }else{ pTokenizer->pModule->xDestroy(pTokenizer); } - }else{ - *ppVTab = &p->base; } return rc; } /* @@ -109078,16 +108323,14 @@ /* ** Close the cursor. For additional information see the documentation ** on the xClose method of the virtual table interface. */ -static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; - assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); sqlite3_finalize(pCsr->pStmt); sqlite3Fts3ExprFree(pCsr->pExpr); - sqlite3Fts3FreeDeferredTokens(pCsr); sqlite3_free(pCsr->aDoclist); sqlite3_free(pCsr->aMatchinfo); sqlite3_free(pCsr); return SQLITE_OK; } @@ -109122,186 +108365,134 @@ return SQLITE_OK; } } /* -** This function is used to process a single interior node when searching -** a b-tree for a term or term prefix. The node data is passed to this -** function via the zNode/nNode parameters. The term to search for is -** passed in zTerm/nTerm. -** -** If piFirst is not NULL, then this function sets *piFirst to the blockid -** of the child node that heads the sub-tree that may contain the term. -** -** If piLast is not NULL, then *piLast is set to the right-most child node -** that heads a sub-tree that may contain a term for which zTerm/nTerm is -** a prefix. -** -** If an OOM error occurs, SQLITE_NOMEM is returned. Otherwise, SQLITE_OK. -*/ -static int fts3ScanInteriorNode( - Fts3Table *p, /* Virtual table handle */ - const char *zTerm, /* Term to select leaves for */ - int nTerm, /* Size of term zTerm in bytes */ - const char *zNode, /* Buffer containing segment interior node */ - int nNode, /* Size of buffer at zNode */ - sqlite3_int64 *piFirst, /* OUT: Selected child node */ - sqlite3_int64 *piLast /* OUT: Selected child node */ -){ +** Advance the cursor to the next row in the %_content table that +** matches the search criteria. For a MATCH search, this will be +** the next row that matches. For a full-table scan, this will be +** simply the next row in the %_content table. For a docid lookup, +** this routine simply sets the EOF flag. +** +** Return SQLITE_OK if nothing goes wrong. SQLITE_OK is returned +** even if we reach end-of-file. The fts3EofMethod() will be called +** subsequently to determine whether or not an EOF was hit. +*/ +static int fts3NextMethod(sqlite3_vtab_cursor *pCursor){ int rc = SQLITE_OK; /* Return code */ - const char *zCsr = zNode; /* Cursor to iterate through node */ - const char *zEnd = &zCsr[nNode];/* End of interior node buffer */ - char *zBuffer = 0; /* Buffer to load terms into */ - int nAlloc = 0; /* Size of allocated buffer */ - int isFirstTerm = 1; /* True when processing first term on page */ - sqlite3_int64 iChild; /* Block id of child node to descend to */ - - /* Skip over the 'height' varint that occurs at the start of every - ** interior node. Then load the blockid of the left-child of the b-tree - ** node into variable iChild. - ** - ** Even if the data structure on disk is corrupted, this (reading two - ** varints from the buffer) does not risk an overread. If zNode is a - ** root node, then the buffer comes from a SELECT statement. SQLite does - ** not make this guarantee explicitly, but in practice there are always - ** either more than 20 bytes of allocated space following the nNode bytes of - ** contents, or two zero bytes. Or, if the node is read from the %_segments - ** table, then there are always 20 bytes of zeroed padding following the - ** nNode bytes of content (see sqlite3Fts3ReadBlock() for details). - */ - zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); - zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); - if( zCsr>=zEnd ){ - return SQLITE_CORRUPT; - } - - while( zCsrzEnd ){ - rc = SQLITE_CORRUPT; - goto finish_scan; - } - if( nPrefix+nSuffix>nAlloc ){ - char *zNew; - nAlloc = (nPrefix+nSuffix) * 2; - zNew = (char *)sqlite3_realloc(zBuffer, nAlloc); - if( !zNew ){ - rc = SQLITE_NOMEM; - goto finish_scan; - } - zBuffer = zNew; - } - memcpy(&zBuffer[nPrefix], zCsr, nSuffix); - nBuffer = nPrefix + nSuffix; - zCsr += nSuffix; - - /* Compare the term we are searching for with the term just loaded from - ** the interior node. If the specified term is greater than or equal - ** to the term from the interior node, then all terms on the sub-tree - ** headed by node iChild are smaller than zTerm. No need to search - ** iChild. - ** - ** If the interior node term is larger than the specified term, then - ** the tree headed by iChild may contain the specified term. - */ - cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer)); - if( piFirst && (cmp<0 || (cmp==0 && nBuffer>nTerm)) ){ - *piFirst = iChild; - piFirst = 0; - } - - if( piLast && cmp<0 ){ - *piLast = iChild; - piLast = 0; - } - - iChild++; - }; - - if( piFirst ) *piFirst = iChild; - if( piLast ) *piLast = iChild; - - finish_scan: - sqlite3_free(zBuffer); - return rc; -} - - -/* -** The buffer pointed to by argument zNode (size nNode bytes) contains an -** interior node of a b-tree segment. The zTerm buffer (size nTerm bytes) -** contains a term. This function searches the sub-tree headed by the zNode -** node for the range of leaf nodes that may contain the specified term -** or terms for which the specified term is a prefix. -** -** If piLeaf is not NULL, then *piLeaf is set to the blockid of the -** left-most leaf node in the tree that may contain the specified term. -** If piLeaf2 is not NULL, then *piLeaf2 is set to the blockid of the -** right-most leaf node that may contain a term for which the specified -** term is a prefix. -** -** It is possible that the range of returned leaf nodes does not contain -** the specified term or any terms for which it is a prefix. However, if the -** segment does contain any such terms, they are stored within the identified -** range. Because this function only inspects interior segment nodes (and -** never loads leaf nodes into memory), it is not possible to be sure. + Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; + + if( pCsr->aDoclist==0 ){ + if( SQLITE_ROW!=sqlite3_step(pCsr->pStmt) ){ + pCsr->isEof = 1; + rc = sqlite3_reset(pCsr->pStmt); + } + }else if( pCsr->pNextId>=&pCsr->aDoclist[pCsr->nDoclist] ){ + pCsr->isEof = 1; + }else{ + sqlite3_reset(pCsr->pStmt); + fts3GetDeltaVarint(&pCsr->pNextId, &pCsr->iPrevId); + pCsr->isRequireSeek = 1; + pCsr->isMatchinfoNeeded = 1; + } + return rc; +} + + +/* +** The buffer pointed to by argument zNode (size nNode bytes) contains the +** root node of a b-tree segment. The segment is guaranteed to be at least +** one level high (i.e. the root node is not also a leaf). If successful, +** this function locates the leaf node of the segment that may contain the +** term specified by arguments zTerm and nTerm and writes its block number +** to *piLeaf. +** +** It is possible that the returned leaf node does not contain the specified +** term. However, if the segment does contain said term, it is stored on +** the identified leaf node. Because this function only inspects interior +** segment nodes (and never loads leaf nodes into memory), it is not possible +** to be sure. ** ** If an error occurs, an error code other than SQLITE_OK is returned. */ static int fts3SelectLeaf( Fts3Table *p, /* Virtual table handle */ const char *zTerm, /* Term to select leaves for */ int nTerm, /* Size of term zTerm in bytes */ const char *zNode, /* Buffer containing segment interior node */ int nNode, /* Size of buffer at zNode */ - sqlite3_int64 *piLeaf, /* Selected leaf node */ - sqlite3_int64 *piLeaf2 /* Selected leaf node */ -){ - int rc; /* Return code */ - int iHeight; /* Height of this node in tree */ - - assert( piLeaf || piLeaf2 ); - - sqlite3Fts3GetVarint32(zNode, &iHeight); - rc = fts3ScanInteriorNode(p, zTerm, nTerm, zNode, nNode, piLeaf, piLeaf2); - assert( !piLeaf2 || !piLeaf || rc!=SQLITE_OK || (*piLeaf<=*piLeaf2) ); - - if( rc==SQLITE_OK && iHeight>1 ){ - char *zBlob = 0; /* Blob read from %_segments table */ - int nBlob; /* Size of zBlob in bytes */ - - if( piLeaf && piLeaf2 && (*piLeaf!=*piLeaf2) ){ - rc = sqlite3Fts3ReadBlock(p, *piLeaf, &zBlob, &nBlob); - if( rc==SQLITE_OK ){ - rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, 0); - } - sqlite3_free(zBlob); - piLeaf = 0; - zBlob = 0; - } - - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3ReadBlock(p, piLeaf ? *piLeaf : *piLeaf2, &zBlob, &nBlob); - } - if( rc==SQLITE_OK ){ - rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, piLeaf2); - } - sqlite3_free(zBlob); - } - + sqlite3_int64 *piLeaf /* Selected leaf node */ +){ + int rc = SQLITE_OK; /* Return code */ + const char *zCsr = zNode; /* Cursor to iterate through node */ + const char *zEnd = &zCsr[nNode];/* End of interior node buffer */ + char *zBuffer = 0; /* Buffer to load terms into */ + int nAlloc = 0; /* Size of allocated buffer */ + + while( 1 ){ + int isFirstTerm = 1; /* True when processing first term on page */ + int iHeight; /* Height of this node in tree */ + sqlite3_int64 iChild; /* Block id of child node to descend to */ + int nBlock; /* Size of child node in bytes */ + + zCsr += sqlite3Fts3GetVarint32(zCsr, &iHeight); + zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); + + while( zCsrnAlloc ){ + char *zNew; + nAlloc = (nPrefix+nSuffix) * 2; + zNew = (char *)sqlite3_realloc(zBuffer, nAlloc); + if( !zNew ){ + sqlite3_free(zBuffer); + return SQLITE_NOMEM; + } + zBuffer = zNew; + } + memcpy(&zBuffer[nPrefix], zCsr, nSuffix); + nBuffer = nPrefix + nSuffix; + zCsr += nSuffix; + + /* Compare the term we are searching for with the term just loaded from + ** the interior node. If the specified term is greater than or equal + ** to the term from the interior node, then all terms on the sub-tree + ** headed by node iChild are smaller than zTerm. No need to search + ** iChild. + ** + ** If the interior node term is larger than the specified term, then + ** the tree headed by iChild may contain the specified term. + */ + cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer)); + if( cmp<0 || (cmp==0 && nBuffer>nTerm) ) break; + iChild++; + }; + + /* If (iHeight==1), the children of this interior node are leaves. The + ** specified term may be present on leaf node iChild. + */ + if( iHeight==1 ){ + *piLeaf = iChild; + break; + } + + /* Descend to interior node iChild. */ + rc = sqlite3Fts3ReadBlock(p, iChild, &zCsr, &nBlock); + if( rc!=SQLITE_OK ) break; + zEnd = &zCsr[nBlock]; + } + sqlite3_free(zBuffer); return rc; } /* ** This function is used to create delta-encoded serialized lists of FTS3 @@ -109529,48 +108720,24 @@ *pp2 = p2 + 1; } /* ** nToken==1 searches for adjacent positions. -** -** This function is used to merge two position lists into one. When it is -** called, *pp1 and *pp2 must both point to position lists. A position-list is -** the part of a doclist that follows each document id. For example, if a row -** contains: -** -** 'a b c'|'x y z'|'a b b a' -** -** Then the position list for this row for token 'b' would consist of: -** -** 0x02 0x01 0x02 0x03 0x03 0x00 -** -** When this function returns, both *pp1 and *pp2 are left pointing to the -** byte following the 0x00 terminator of their respective position lists. -** -** If isSaveLeft is 0, an entry is added to the output position list for -** each position in *pp2 for which there exists one or more positions in -** *pp1 so that (pos(*pp2)>pos(*pp1) && pos(*pp2)-pos(*pp1)<=nToken). i.e. -** when the *pp1 token appears before the *pp2 token, but not more than nToken -** slots before it. */ static int fts3PoslistPhraseMerge( - char **pp, /* IN/OUT: Preallocated output buffer */ + char **pp, /* Output buffer */ int nToken, /* Maximum difference in token positions */ int isSaveLeft, /* Save the left position */ - int isExact, /* If *pp1 is exactly nTokens before *pp2 */ - char **pp1, /* IN/OUT: Left input list */ - char **pp2 /* IN/OUT: Right input list */ + char **pp1, /* Left input list */ + char **pp2 /* Right input list */ ){ char *p = (pp ? *pp : 0); char *p1 = *pp1; char *p2 = *pp2; + int iCol1 = 0; int iCol2 = 0; - - /* Never set both isSaveLeft and isExact for the same invocation. */ - assert( isSaveLeft==0 || isExact==0 ); - assert( *p1!=0 && *p2!=0 ); if( *p1==POS_COLUMN ){ p1++; p1 += sqlite3Fts3GetVarint32(p1, &iCol1); } @@ -109595,13 +108762,11 @@ assert( *p2!=POS_END && *p2!=POS_COLUMN ); fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; while( 1 ){ - if( iPos2==iPos1+nToken - || (isExact==0 && iPos2>iPos1 && iPos2<=iPos1+nToken) - ){ + if( iPos2>iPos1 && iPos2<=iPos1+nToken ){ sqlite3_int64 iSave; if( !pp ){ fts3PoslistCopy(0, &p2); fts3PoslistCopy(0, &p1); *pp1 = p1; @@ -109680,25 +108845,25 @@ ){ char *p1 = *pp1; char *p2 = *pp2; if( !pp ){ - if( fts3PoslistPhraseMerge(0, nRight, 0, 0, pp1, pp2) ) return 1; + if( fts3PoslistPhraseMerge(0, nRight, 0, pp1, pp2) ) return 1; *pp1 = p1; *pp2 = p2; - return fts3PoslistPhraseMerge(0, nLeft, 0, 0, pp2, pp1); + return fts3PoslistPhraseMerge(0, nLeft, 0, pp2, pp1); }else{ char *pTmp1 = aTmp; char *pTmp2; char *aTmp2; int res = 1; - fts3PoslistPhraseMerge(&pTmp1, nRight, 0, 0, pp1, pp2); + fts3PoslistPhraseMerge(&pTmp1, nRight, 0, pp1, pp2); aTmp2 = pTmp2 = pTmp1; *pp1 = p1; *pp2 = p2; - fts3PoslistPhraseMerge(&pTmp2, nLeft, 1, 0, pp2, pp1); + fts3PoslistPhraseMerge(&pTmp2, nLeft, 1, pp2, pp1); if( pTmp1!=aTmp && pTmp2!=aTmp2 ){ fts3PoslistMerge(pp, &aTmp, &aTmp2); }else if( pTmp1!=aTmp ){ fts3PoslistCopy(pp, &aTmp); }else if( pTmp2!=aTmp2 ){ @@ -109740,12 +108905,11 @@ char *aBuffer, /* Pre-allocated output buffer */ int *pnBuffer, /* OUT: Bytes written to aBuffer */ char *a1, /* Buffer containing first doclist */ int n1, /* Size of buffer a1 */ char *a2, /* Buffer containing second doclist */ - int n2, /* Size of buffer a2 */ - int *pnDoc /* OUT: Number of docids in output */ + int n2 /* Size of buffer a2 */ ){ sqlite3_int64 i1 = 0; sqlite3_int64 i2 = 0; sqlite3_int64 iPrev = 0; @@ -109752,11 +108916,10 @@ char *p = aBuffer; char *p1 = a1; char *p2 = a2; char *pEnd1 = &a1[n1]; char *pEnd2 = &a2[n2]; - int nDoc = 0; assert( mergetype==MERGE_OR || mergetype==MERGE_POS_OR || mergetype==MERGE_AND || mergetype==MERGE_NOT || mergetype==MERGE_PHRASE || mergetype==MERGE_POS_PHRASE || mergetype==MERGE_NEAR || mergetype==MERGE_POS_NEAR @@ -109796,11 +108959,10 @@ while( p1 && p2 ){ if( i1==i2 ){ fts3PutDeltaVarint(&p, &iPrev, i1); fts3GetDeltaVarint2(&p1, pEnd1, &i1); fts3GetDeltaVarint2(&p2, pEnd2, &i2); - nDoc++; }else if( i1aaOutput); i++){ if( pTS->aaOutput[i] ){ if( !aOut ){ aOut = pTS->aaOutput[i]; nOut = pTS->anOutput[i]; - pTS->aaOutput[i] = 0; + pTS->aaOutput[0] = 0; }else{ int nNew = nOut + pTS->anOutput[i]; char *aNew = sqlite3_malloc(nNew); if( !aNew ){ sqlite3_free(aOut); return SQLITE_NOMEM; } fts3DoclistMerge(mergetype, 0, 0, - aNew, &nNew, pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut, 0 + aNew, &nNew, pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut ); sqlite3_free(pTS->aaOutput[i]); sqlite3_free(aOut); pTS->aaOutput[i] = 0; aOut = aNew; @@ -110003,12 +109162,12 @@ if( aMerge!=aDoclist ){ sqlite3_free(aMerge); } return SQLITE_NOMEM; } - fts3DoclistMerge(mergetype, 0, 0, aNew, &nNew, - pTS->aaOutput[iOut], pTS->anOutput[iOut], aMerge, nMerge, 0 + fts3DoclistMerge(mergetype, 0, 0, + aNew, &nNew, pTS->aaOutput[iOut], pTS->anOutput[iOut], aMerge, nMerge ); if( iOut>0 ) sqlite3_free(aMerge); sqlite3_free(pTS->aaOutput[iOut]); pTS->aaOutput[iOut] = 0; @@ -110022,165 +109181,10 @@ } } return SQLITE_OK; } -static int fts3DeferredTermSelect( - Fts3DeferredToken *pToken, /* Phrase token */ - int isTermPos, /* True to include positions */ - int *pnOut, /* OUT: Size of list */ - char **ppOut /* OUT: Body of list */ -){ - char *aSource; - int nSource; - - aSource = sqlite3Fts3DeferredDoclist(pToken, &nSource); - if( !aSource ){ - *pnOut = 0; - *ppOut = 0; - }else if( isTermPos ){ - *ppOut = sqlite3_malloc(nSource); - if( !*ppOut ) return SQLITE_NOMEM; - memcpy(*ppOut, aSource, nSource); - *pnOut = nSource; - }else{ - sqlite3_int64 docid; - *pnOut = sqlite3Fts3GetVarint(aSource, &docid); - *ppOut = sqlite3_malloc(*pnOut); - if( !*ppOut ) return SQLITE_NOMEM; - sqlite3Fts3PutVarint(*ppOut, docid); - } - - return SQLITE_OK; -} - -/* -** An Fts3SegReaderArray is used to store an array of Fts3SegReader objects. -** Elements are added to the array using fts3SegReaderArrayAdd(). -*/ -struct Fts3SegReaderArray { - int nSegment; /* Number of valid entries in apSegment[] */ - int nAlloc; /* Allocated size of apSegment[] */ - int nCost; /* The cost of executing SegReaderIterate() */ - Fts3SegReader *apSegment[1]; /* Array of seg-reader objects */ -}; - - -/* -** Free an Fts3SegReaderArray object. Also free all seg-readers in the -** array (using sqlite3Fts3SegReaderFree()). -*/ -static void fts3SegReaderArrayFree(Fts3SegReaderArray *pArray){ - if( pArray ){ - int i; - for(i=0; inSegment; i++){ - sqlite3Fts3SegReaderFree(0, pArray->apSegment[i]); - } - sqlite3_free(pArray); - } -} - -static int fts3SegReaderArrayAdd( - Fts3SegReaderArray **ppArray, - Fts3SegReader *pNew -){ - Fts3SegReaderArray *pArray = *ppArray; - - if( !pArray || pArray->nAlloc==pArray->nSegment ){ - int nNew = (pArray ? pArray->nAlloc+16 : 16); - pArray = (Fts3SegReaderArray *)sqlite3_realloc(pArray, - sizeof(Fts3SegReaderArray) + (nNew-1) * sizeof(Fts3SegReader*) - ); - if( !pArray ){ - sqlite3Fts3SegReaderFree(0, pNew); - return SQLITE_NOMEM; - } - if( nNew==16 ){ - pArray->nSegment = 0; - pArray->nCost = 0; - } - pArray->nAlloc = nNew; - *ppArray = pArray; - } - - pArray->apSegment[pArray->nSegment++] = pNew; - return SQLITE_OK; -} - -static int fts3TermSegReaderArray( - Fts3Cursor *pCsr, /* Virtual table cursor handle */ - const char *zTerm, /* Term to query for */ - int nTerm, /* Size of zTerm in bytes */ - int isPrefix, /* True for a prefix search */ - Fts3SegReaderArray **ppArray /* OUT: Allocated seg-reader array */ -){ - Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; - int rc; /* Return code */ - Fts3SegReaderArray *pArray = 0; /* Array object to build */ - Fts3SegReader *pReader = 0; /* Seg-reader to add to pArray */ - sqlite3_stmt *pStmt = 0; /* SQL statement to scan %_segdir table */ - int iAge = 0; /* Used to assign ages to segments */ - - /* Allocate a seg-reader to scan the pending terms, if any. */ - rc = sqlite3Fts3SegReaderPending(p, zTerm, nTerm, isPrefix, &pReader); - if( rc==SQLITE_OK && pReader ) { - rc = fts3SegReaderArrayAdd(&pArray, pReader); - } - - /* Loop through the entire %_segdir table. For each segment, create a - ** Fts3SegReader to iterate through the subset of the segment leaves - ** that may contain a term that matches zTerm/nTerm. For non-prefix - ** searches, this is always a single leaf. For prefix searches, this - ** may be a contiguous block of leaves. - */ - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3AllSegdirs(p, &pStmt); - } - while( rc==SQLITE_OK && SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ - Fts3SegReader *pNew = 0; - int nRoot = sqlite3_column_bytes(pStmt, 4); - char const *zRoot = sqlite3_column_blob(pStmt, 4); - if( sqlite3_column_int64(pStmt, 1)==0 ){ - /* The entire segment is stored on the root node (which must be a - ** leaf). Do not bother inspecting any data in this case, just - ** create a Fts3SegReader to scan the single leaf. - */ - rc = sqlite3Fts3SegReaderNew(p, iAge, 0, 0, 0, zRoot, nRoot, &pNew); - }else{ - sqlite3_int64 i1; /* First leaf that may contain zTerm */ - sqlite3_int64 i2; /* Final leaf that may contain zTerm */ - rc = fts3SelectLeaf(p, zTerm, nTerm, zRoot, nRoot, &i1, (isPrefix?&i2:0)); - if( isPrefix==0 ) i2 = i1; - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3SegReaderNew(p, iAge, i1, i2, 0, 0, 0, &pNew); - } - } - assert( (pNew==0)==(rc!=SQLITE_OK) ); - - /* If a new Fts3SegReader was allocated, add it to the array. */ - if( rc==SQLITE_OK ){ - rc = fts3SegReaderArrayAdd(&pArray, pNew); - } - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3SegReaderCost(pCsr, pNew, &pArray->nCost); - } - iAge++; - } - - if( rc==SQLITE_DONE ){ - rc = sqlite3_reset(pStmt); - }else{ - sqlite3_reset(pStmt); - } - if( rc!=SQLITE_OK ){ - fts3SegReaderArrayFree(pArray); - pArray = 0; - } - *ppArray = pArray; - return rc; -} - /* ** This function retreives the doclist for the specified term (or term ** prefix) from the database. ** ** The returned doclist may be in one of two formats, depending on the @@ -110190,150 +109194,145 @@ ** in the database without the found length specifier at the start of on-disk ** doclists. */ static int fts3TermSelect( Fts3Table *p, /* Virtual table handle */ - Fts3PhraseToken *pTok, /* Token to query for */ int iColumn, /* Column to query (or -ve for all columns) */ + const char *zTerm, /* Term to query for */ + int nTerm, /* Size of zTerm in bytes */ + int isPrefix, /* True for a prefix search */ int isReqPos, /* True to include position lists in output */ int *pnOut, /* OUT: Size of buffer at *ppOut */ char **ppOut /* OUT: Malloced result buffer */ ){ + int i; + TermSelect tsc; + Fts3SegFilter filter; /* Segment term filter configuration */ + Fts3SegReader **apSegment; /* Array of segments to read data from */ + int nSegment = 0; /* Size of apSegment array */ + int nAlloc = 16; /* Allocated size of segment array */ int rc; /* Return code */ - Fts3SegReaderArray *pArray; /* Seg-reader array for this term */ - TermSelect tsc; /* Context object for fts3TermSelectCb() */ - Fts3SegFilter filter; /* Segment term filter configuration */ + sqlite3_stmt *pStmt = 0; /* SQL statement to scan %_segdir table */ + int iAge = 0; /* Used to assign ages to segments */ - pArray = pTok->pArray; + apSegment = (Fts3SegReader **)sqlite3_malloc(sizeof(Fts3SegReader*)*nAlloc); + if( !apSegment ) return SQLITE_NOMEM; + rc = sqlite3Fts3SegReaderPending(p, zTerm, nTerm, isPrefix, &apSegment[0]); + if( rc!=SQLITE_OK ) goto finished; + if( apSegment[0] ){ + nSegment = 1; + } + + /* Loop through the entire %_segdir table. For each segment, create a + ** Fts3SegReader to iterate through the subset of the segment leaves + ** that may contain a term that matches zTerm/nTerm. For non-prefix + ** searches, this is always a single leaf. For prefix searches, this + ** may be a contiguous block of leaves. + ** + ** The code in this loop does not actually load any leaves into memory + ** (unless the root node happens to be a leaf). It simply examines the + ** b-tree structure to determine which leaves need to be inspected. + */ + rc = sqlite3Fts3AllSegdirs(p, &pStmt); + while( rc==SQLITE_OK && SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ + Fts3SegReader *pNew = 0; + int nRoot = sqlite3_column_bytes(pStmt, 4); + char const *zRoot = sqlite3_column_blob(pStmt, 4); + if( sqlite3_column_int64(pStmt, 1)==0 ){ + /* The entire segment is stored on the root node (which must be a + ** leaf). Do not bother inspecting any data in this case, just + ** create a Fts3SegReader to scan the single leaf. + */ + rc = sqlite3Fts3SegReaderNew(p, iAge, 0, 0, 0, zRoot, nRoot, &pNew); + }else{ + int rc2; /* Return value of sqlite3Fts3ReadBlock() */ + sqlite3_int64 i1; /* Blockid of leaf that may contain zTerm */ + rc = fts3SelectLeaf(p, zTerm, nTerm, zRoot, nRoot, &i1); + if( rc==SQLITE_OK ){ + sqlite3_int64 i2 = sqlite3_column_int64(pStmt, 2); + rc = sqlite3Fts3SegReaderNew(p, iAge, i1, i2, 0, 0, 0, &pNew); + } + + /* The following call to ReadBlock() serves to reset the SQL statement + ** used to retrieve blocks of data from the %_segments table. If it is + ** not reset here, then it may remain classified as an active statement + ** by SQLite, which may lead to "DROP TABLE" or "DETACH" commands + ** failing. + */ + rc2 = sqlite3Fts3ReadBlock(p, 0, 0, 0); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + iAge++; + + /* If a new Fts3SegReader was allocated, add it to the apSegment array. */ + assert( pNew!=0 || rc!=SQLITE_OK ); + if( pNew ){ + if( nSegment==nAlloc ){ + Fts3SegReader **pArray; + nAlloc += 16; + pArray = (Fts3SegReader **)sqlite3_realloc( + apSegment, nAlloc*sizeof(Fts3SegReader *) + ); + if( !pArray ){ + sqlite3Fts3SegReaderFree(p, pNew); + rc = SQLITE_NOMEM; + goto finished; + } + apSegment = pArray; + } + apSegment[nSegment++] = pNew; + } + } + if( rc!=SQLITE_DONE ){ + assert( rc!=SQLITE_OK ); + goto finished; + } + memset(&tsc, 0, sizeof(TermSelect)); tsc.isReqPos = isReqPos; filter.flags = FTS3_SEGMENT_IGNORE_EMPTY - | (pTok->isPrefix ? FTS3_SEGMENT_PREFIX : 0) + | (isPrefix ? FTS3_SEGMENT_PREFIX : 0) | (isReqPos ? FTS3_SEGMENT_REQUIRE_POS : 0) | (iColumnnColumn ? FTS3_SEGMENT_COLUMN_FILTER : 0); filter.iCol = iColumn; - filter.zTerm = pTok->z; - filter.nTerm = pTok->n; + filter.zTerm = zTerm; + filter.nTerm = nTerm; - rc = sqlite3Fts3SegReaderIterate(p, pArray->apSegment, pArray->nSegment, - &filter, fts3TermSelectCb, (void *)&tsc + rc = sqlite3Fts3SegReaderIterate(p, apSegment, nSegment, &filter, + fts3TermSelectCb, (void *)&tsc ); if( rc==SQLITE_OK ){ rc = fts3TermSelectMerge(&tsc); } if( rc==SQLITE_OK ){ *ppOut = tsc.aaOutput[0]; *pnOut = tsc.anOutput[0]; }else{ - int i; for(i=0; ipArray = 0; - return rc; -} - -/* -** This function counts the total number of docids in the doclist stored -** in buffer aList[], size nList bytes. -** -** If the isPoslist argument is true, then it is assumed that the doclist -** contains a position-list following each docid. Otherwise, it is assumed -** that the doclist is simply a list of docids stored as delta encoded -** varints. -*/ -static int fts3DoclistCountDocids(int isPoslist, char *aList, int nList){ - int nDoc = 0; /* Return value */ - if( aList ){ - char *aEnd = &aList[nList]; /* Pointer to one byte after EOF */ - char *p = aList; /* Cursor */ - if( !isPoslist ){ - /* The number of docids in the list is the same as the number of - ** varints. In FTS3 a varint consists of a single byte with the 0x80 - ** bit cleared and zero or more bytes with the 0x80 bit set. So to - ** count the varints in the buffer, just count the number of bytes - ** with the 0x80 bit clear. */ - while( ppLeft); - if( rc==SQLITE_OK ){ - rc = fts3DeferExpression(pCsr, pExpr->pRight); - } - if( pExpr->eType==FTSQUERY_PHRASE ){ - int iCol = pExpr->pPhrase->iColumn; - int i; - for(i=0; rc==SQLITE_OK && ipPhrase->nToken; i++){ - Fts3PhraseToken *pToken = &pExpr->pPhrase->aToken[i]; - if( pToken->pDeferred==0 ){ - rc = sqlite3Fts3DeferToken(pCsr, pToken, iCol); - } - } - } - } - return rc; -} - -/* -** This function removes the position information from a doclist. When -** called, buffer aList (size *pnList bytes) contains a doclist that includes -** position information. This function removes the position information so -** that aList contains only docids, and adjusts *pnList to reflect the new -** (possibly reduced) size of the doclist. -*/ -static void fts3DoclistStripPositions( - char *aList, /* IN/OUT: Buffer containing doclist */ - int *pnList /* IN/OUT: Size of doclist in bytes */ -){ - if( aList ){ - char *aEnd = &aList[*pnList]; /* Pointer to one byte after EOF */ - char *p = aList; /* Input cursor */ - char *pOut = aList; /* Output cursor */ - - while( piColumn; int isTermPos = (pPhrase->nToken>1 || isReqPos); - Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; - int isFirst = 1; - - int iPrevTok = 0; - int nDoc = 0; - - /* If this is an xFilter() evaluation, create a segment-reader for each - ** phrase token. Or, if this is an xNext() or snippet/offsets/matchinfo - ** evaluation, only create segment-readers if there are no Fts3DeferredToken - ** objects attached to the phrase-tokens. - */ - for(ii=0; iinToken; ii++){ - Fts3PhraseToken *pTok = &pPhrase->aToken[ii]; - if( pTok->pArray==0 ){ - if( (pCsr->eEvalmode==FTS3_EVAL_FILTER) - || (pCsr->eEvalmode==FTS3_EVAL_NEXT && pCsr->pDeferred==0) - || (pCsr->eEvalmode==FTS3_EVAL_MATCHINFO && pTok->bFulltext) - ){ - rc = fts3TermSegReaderArray( - pCsr, pTok->z, pTok->n, pTok->isPrefix, &pTok->pArray - ); - if( rc!=SQLITE_OK ) return rc; - } - } - } - - for(ii=0; iinToken; ii++){ - Fts3PhraseToken *pTok; /* Token to find doclist for */ - int iTok; /* The token being queried this iteration */ + + for(ii=0; iinToken; ii++){ + struct PhraseToken *pTok = &pPhrase->aToken[ii]; + char *z = pTok->z; /* Next token of the phrase */ + int n = pTok->n; /* Size of z in bytes */ + int isPrefix = pTok->isPrefix;/* True if token is a prefix */ char *pList; /* Pointer to token doclist */ int nList; /* Size of buffer at pList */ - /* Select a token to process. If this is an xFilter() call, then tokens - ** are processed in order from least to most costly. Otherwise, tokens - ** are processed in the order in which they occur in the phrase. - */ - if( pCsr->eEvalmode==FTS3_EVAL_MATCHINFO ){ - assert( isReqPos ); - iTok = ii; - pTok = &pPhrase->aToken[iTok]; - if( pTok->bFulltext==0 ) continue; - }else if( pCsr->eEvalmode==FTS3_EVAL_NEXT || isReqPos ){ - iTok = ii; - pTok = &pPhrase->aToken[iTok]; - }else{ - int nMinCost = 0x7FFFFFFF; - int jj; - - /* Find the remaining token with the lowest cost. */ - for(jj=0; jjnToken; jj++){ - Fts3SegReaderArray *pArray = pPhrase->aToken[jj].pArray; - if( pArray && pArray->nCostnCost; - } - } - pTok = &pPhrase->aToken[iTok]; - - /* This branch is taken if it is determined that loading the doclist - ** for the next token would require more IO than loading all documents - ** currently identified by doclist pOut/nOut. No further doclists will - ** be loaded from the full-text index for this phrase. - */ - if( nMinCost>nDoc && ii>0 ){ - rc = fts3DeferExpression(pCsr, pCsr->pExpr); - break; - } - } - - if( pCsr->eEvalmode==FTS3_EVAL_NEXT && pTok->pDeferred ){ - rc = fts3DeferredTermSelect(pTok->pDeferred, isTermPos, &nList, &pList); - }else{ - assert( pTok->pArray ); - rc = fts3TermSelect(p, pTok, iCol, isTermPos, &nList, &pList); - pTok->bFulltext = 1; - } - assert( rc!=SQLITE_OK || pCsr->eEvalmode || pTok->pArray==0 ); + rc = fts3TermSelect(p, iCol, z, n, isPrefix, isTermPos, &nList, &pList); if( rc!=SQLITE_OK ) break; - if( isFirst ){ + if( ii==0 ){ pOut = pList; nOut = nList; - if( pCsr->eEvalmode==FTS3_EVAL_FILTER && pPhrase->nToken>1 ){ - nDoc = fts3DoclistCountDocids(1, pOut, nOut); - } - isFirst = 0; - iPrevTok = iTok; }else{ - /* Merge the new term list and the current output. */ - char *aLeft, *aRight; - int nLeft, nRight; - int nDist; - int mt; - - /* If this is the final token of the phrase, and positions were not - ** requested by the caller, use MERGE_PHRASE instead of POS_PHRASE. - ** This drops the position information from the output list. + /* Merge the new term list and the current output. If this is the + ** last term in the phrase, and positions are not required in the + ** output of this function, the positions can be dropped as part + ** of this merge. Either way, the result of this merge will be + ** smaller than nList bytes. The code in fts3DoclistMerge() is written + ** so that it is safe to use pList as the output as well as an input + ** in this case. */ - mt = MERGE_POS_PHRASE; - if( ii==pPhrase->nToken-1 && !isReqPos ) mt = MERGE_PHRASE; - - assert( iPrevTok!=iTok ); - if( iPrevToknToken-1 && !isReqPos ){ + mergetype = MERGE_PHRASE; + } + fts3DoclistMerge(mergetype, 0, 0, pList, &nOut, pOut, nOut, pList, nList); + sqlite3_free(pOut); + pOut = pList; } assert( nOut==0 || pOut!=0 ); } if( rc==SQLITE_OK ){ - if( ii!=pPhrase->nToken ){ - assert( pCsr->eEvalmode==FTS3_EVAL_FILTER && isReqPos==0 ); - fts3DoclistStripPositions(pOut, &nOut); - } *paOut = pOut; *pnOut = nOut; }else{ sqlite3_free(pOut); } return rc; } -/* -** This function merges two doclists according to the requirements of a -** NEAR operator. -** -** Both input doclists must include position information. The output doclist -** includes position information if the first argument to this function -** is MERGE_POS_NEAR, or does not if it is MERGE_NEAR. -*/ static int fts3NearMerge( int mergetype, /* MERGE_POS_NEAR or MERGE_NEAR */ int nNear, /* Parameter to NEAR operator */ int nTokenLeft, /* Number of tokens in LHS phrase arg */ char *aLeft, /* Doclist for LHS (incl. positions) */ @@ -110499,21 +109396,21 @@ char *aRight, /* As aLeft */ int nRight, /* As nRight */ char **paOut, /* OUT: Results of merge (malloced) */ int *pnOut /* OUT: Sized of output buffer */ ){ - char *aOut; /* Buffer to write output doclist to */ - int rc; /* Return code */ + char *aOut; + int rc; assert( mergetype==MERGE_POS_NEAR || MERGE_NEAR ); aOut = sqlite3_malloc(nLeft+nRight+1); if( aOut==0 ){ rc = SQLITE_NOMEM; }else{ rc = fts3DoclistMerge(mergetype, nNear+nTokenRight, nNear+nTokenLeft, - aOut, pnOut, aLeft, nLeft, aRight, nRight, 0 + aOut, pnOut, aLeft, nLeft, aRight, nRight ); if( rc!=SQLITE_OK ){ sqlite3_free(aOut); aOut = 0; } @@ -110521,36 +109418,21 @@ *paOut = aOut; return rc; } -/* -** This function is used as part of the processing for the snippet() and -** offsets() functions. -** -** Both pLeft and pRight are expression nodes of type FTSQUERY_PHRASE. Both -** have their respective doclists (including position information) loaded -** in Fts3Expr.aDoclist/nDoclist. This function removes all entries from -** each doclist that are not within nNear tokens of a corresponding entry -** in the other doclist. -*/ SQLITE_PRIVATE int sqlite3Fts3ExprNearTrim(Fts3Expr *pLeft, Fts3Expr *pRight, int nNear){ - int rc; /* Return code */ - - assert( pLeft->eType==FTSQUERY_PHRASE ); - assert( pRight->eType==FTSQUERY_PHRASE ); - assert( pLeft->isLoaded && pRight->isLoaded ); - + int rc; if( pLeft->aDoclist==0 || pRight->aDoclist==0 ){ sqlite3_free(pLeft->aDoclist); sqlite3_free(pRight->aDoclist); pRight->aDoclist = 0; pLeft->aDoclist = 0; rc = SQLITE_OK; }else{ - char *aOut; /* Buffer in which to assemble new doclist */ - int nOut; /* Size of buffer aOut in bytes */ + char *aOut; + int nOut; rc = fts3NearMerge(MERGE_POS_NEAR, nNear, pLeft->pPhrase->nToken, pLeft->aDoclist, pLeft->nDoclist, pRight->pPhrase->nToken, pRight->aDoclist, pRight->nDoclist, &aOut, &nOut @@ -110570,157 +109452,18 @@ pLeft->nDoclist = nOut; } return rc; } - -/* -** Allocate an Fts3SegReaderArray for each token in the expression pExpr. -** The allocated objects are stored in the Fts3PhraseToken.pArray member -** variables of each token structure. -*/ -static int fts3ExprAllocateSegReaders( - Fts3Cursor *pCsr, /* FTS3 table */ - Fts3Expr *pExpr, /* Expression to create seg-readers for */ - int *pnExpr /* OUT: Number of AND'd expressions */ -){ - int rc = SQLITE_OK; /* Return code */ - - assert( pCsr->eEvalmode==FTS3_EVAL_FILTER ); - if( pnExpr && pExpr->eType!=FTSQUERY_AND ){ - (*pnExpr)++; - pnExpr = 0; - } - - if( pExpr->eType==FTSQUERY_PHRASE ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - int ii; - - for(ii=0; rc==SQLITE_OK && iinToken; ii++){ - Fts3PhraseToken *pTok = &pPhrase->aToken[ii]; - if( pTok->pArray==0 ){ - rc = fts3TermSegReaderArray( - pCsr, pTok->z, pTok->n, pTok->isPrefix, &pTok->pArray - ); - } - } - }else{ - rc = fts3ExprAllocateSegReaders(pCsr, pExpr->pLeft, pnExpr); - if( rc==SQLITE_OK ){ - rc = fts3ExprAllocateSegReaders(pCsr, pExpr->pRight, pnExpr); - } - } - return rc; -} - -/* -** Free the Fts3SegReaderArray objects associated with each token in the -** expression pExpr. In other words, this function frees the resources -** allocated by fts3ExprAllocateSegReaders(). -*/ -static void fts3ExprFreeSegReaders(Fts3Expr *pExpr){ - if( pExpr ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - if( pPhrase ){ - int kk; - for(kk=0; kknToken; kk++){ - fts3SegReaderArrayFree(pPhrase->aToken[kk].pArray); - pPhrase->aToken[kk].pArray = 0; - } - } - fts3ExprFreeSegReaders(pExpr->pLeft); - fts3ExprFreeSegReaders(pExpr->pRight); - } -} - -/* -** Return the sum of the costs of all tokens in the expression pExpr. This -** function must be called after Fts3SegReaderArrays have been allocated -** for all tokens using fts3ExprAllocateSegReaders(). -*/ -int fts3ExprCost(Fts3Expr *pExpr){ - int nCost; /* Return value */ - if( pExpr->eType==FTSQUERY_PHRASE ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - int ii; - nCost = 0; - for(ii=0; iinToken; ii++){ - nCost += pPhrase->aToken[ii].pArray->nCost; - } - }else{ - nCost = fts3ExprCost(pExpr->pLeft) + fts3ExprCost(pExpr->pRight); - } - return nCost; -} - -/* -** The following is a helper function (and type) for fts3EvalExpr(). It -** must be called after Fts3SegReaders have been allocated for every token -** in the expression. See the context it is called from in fts3EvalExpr() -** for further explanation. -*/ -typedef struct ExprAndCost ExprAndCost; -struct ExprAndCost { - Fts3Expr *pExpr; - int nCost; -}; -static void fts3ExprAssignCosts( - Fts3Expr *pExpr, /* Expression to create seg-readers for */ - ExprAndCost **ppExprCost /* OUT: Write to *ppExprCost */ -){ - if( pExpr->eType==FTSQUERY_AND ){ - fts3ExprAssignCosts(pExpr->pLeft, ppExprCost); - fts3ExprAssignCosts(pExpr->pRight, ppExprCost); - }else{ - (*ppExprCost)->pExpr = pExpr; - (*ppExprCost)->nCost = fts3ExprCost(pExpr);; - (*ppExprCost)++; - } -} - -/* -** Evaluate the full-text expression pExpr against FTS3 table pTab. Store -** the resulting doclist in *paOut and *pnOut. This routine mallocs for -** the space needed to store the output. The caller is responsible for +/* +** Evaluate the full-text expression pExpr against fts3 table pTab. Store +** the resulting doclist in *paOut and *pnOut. This routine mallocs for +** the space needed to store the output. The caller is responsible for ** freeing the space when it has finished. -** -** This function is called in two distinct contexts: -** -** * From within the virtual table xFilter() method. In this case, the -** output doclist contains entries for all rows in the table, based on -** data read from the full-text index. -** -** In this case, if the query expression contains one or more tokens that -** are very common, then the returned doclist may contain a superset of -** the documents that actually match the expression. -** -** * From within the virtual table xNext() method. This call is only made -** if the call from within xFilter() found that there were very common -** tokens in the query expression and did return a superset of the -** matching documents. In this case the returned doclist contains only -** entries that correspond to the current row of the table. Instead of -** reading the data for each token from the full-text index, the data is -** already available in-memory in the Fts3PhraseToken.pDeferred structures. -** See fts3EvalDeferred() for how it gets there. -** -** In the first case above, Fts3Cursor.doDeferred==0. In the second (if it is -** required) Fts3Cursor.doDeferred==1. -** -** If the SQLite invokes the snippet(), offsets() or matchinfo() function -** as part of a SELECT on an FTS3 table, this function is called on each -** individual phrase expression in the query. If there were very common tokens -** found in the xFilter() call, then this function is called once for phrase -** for each row visited, and the returned doclist contains entries for the -** current row only. Otherwise, if there were no very common tokens, then this -** function is called once only for each phrase in the query and the returned -** doclist contains entries for all rows of the table. -** -** Fts3Cursor.doDeferred==1 when this function is called on phrases as a -** result of a snippet(), offsets() or matchinfo() invocation. */ -static int fts3EvalExpr( - Fts3Cursor *p, /* Virtual table cursor handle */ +static int evalFts3Expr( + Fts3Table *p, /* Virtual table handle */ Fts3Expr *pExpr, /* Parsed fts3 expression */ char **paOut, /* OUT: Pointer to malloc'd result buffer */ int *pnOut, /* OUT: Size of buffer at *paOut */ int isReqPos /* Require positions in output buffer */ ){ @@ -110729,101 +109472,37 @@ /* Zero the output parameters. */ *paOut = 0; *pnOut = 0; if( pExpr ){ - assert( pExpr->eType==FTSQUERY_NEAR || pExpr->eType==FTSQUERY_OR - || pExpr->eType==FTSQUERY_AND || pExpr->eType==FTSQUERY_NOT - || pExpr->eType==FTSQUERY_PHRASE + assert( pExpr->eType==FTSQUERY_PHRASE + || pExpr->eType==FTSQUERY_NEAR + || isReqPos==0 ); - assert( pExpr->eType==FTSQUERY_PHRASE || isReqPos==0 ); - if( pExpr->eType==FTSQUERY_PHRASE ){ - rc = fts3PhraseSelect(p, pExpr->pPhrase, + rc = fts3PhraseSelect(p, pExpr->pPhrase, isReqPos || (pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR), paOut, pnOut ); - fts3ExprFreeSegReaders(pExpr); - }else if( p->eEvalmode==FTS3_EVAL_FILTER && pExpr->eType==FTSQUERY_AND ){ - ExprAndCost *aExpr = 0; /* Array of AND'd expressions and costs */ - int nExpr = 0; /* Size of aExpr[] */ - char *aRet = 0; /* Doclist to return to caller */ - int nRet = 0; /* Length of aRet[] in bytes */ - int nDoc = 0x7FFFFFFF; - - assert( !isReqPos ); - - rc = fts3ExprAllocateSegReaders(p, pExpr, &nExpr); - if( rc==SQLITE_OK ){ - assert( nExpr>1 ); - aExpr = sqlite3_malloc(sizeof(ExprAndCost) * nExpr); - if( !aExpr ) rc = SQLITE_NOMEM; - } - if( rc==SQLITE_OK ){ - int ii; /* Used to iterate through expressions */ - - fts3ExprAssignCosts(pExpr, &aExpr); - aExpr -= nExpr; - for(ii=0; iipExpr && (pBest==0 || pCand->nCostnCost) ){ - pBest = pCand; - } - } - - if( pBest->nCost>nDoc ){ - rc = fts3DeferExpression(p, p->pExpr); - break; - }else{ - rc = fts3EvalExpr(p, pBest->pExpr, &aNew, &nNew, 0); - if( rc!=SQLITE_OK ) break; - pBest->pExpr = 0; - if( ii==0 ){ - aRet = aNew; - nRet = nNew; - nDoc = fts3DoclistCountDocids(0, aRet, nRet); - }else{ - fts3DoclistMerge( - MERGE_AND, 0, 0, aRet, &nRet, aRet, nRet, aNew, nNew, &nDoc - ); - sqlite3_free(aNew); - } - } - } - } - - *paOut = aRet; - *pnOut = nRet; - sqlite3_free(aExpr); - fts3ExprFreeSegReaders(pExpr); - }else{ char *aLeft; char *aRight; int nLeft; int nRight; - assert( pExpr->eType==FTSQUERY_NEAR - || pExpr->eType==FTSQUERY_OR - || pExpr->eType==FTSQUERY_NOT - || (pExpr->eType==FTSQUERY_AND && p->eEvalmode==FTS3_EVAL_NEXT) - ); - - if( 0==(rc = fts3EvalExpr(p, pExpr->pRight, &aRight, &nRight, isReqPos)) - && 0==(rc = fts3EvalExpr(p, pExpr->pLeft, &aLeft, &nLeft, isReqPos)) + if( 0==(rc = evalFts3Expr(p, pExpr->pRight, &aRight, &nRight, isReqPos)) + && 0==(rc = evalFts3Expr(p, pExpr->pLeft, &aLeft, &nLeft, isReqPos)) ){ + assert( pExpr->eType==FTSQUERY_NEAR || pExpr->eType==FTSQUERY_OR + || pExpr->eType==FTSQUERY_AND || pExpr->eType==FTSQUERY_NOT + ); switch( pExpr->eType ){ case FTSQUERY_NEAR: { Fts3Expr *pLeft; Fts3Expr *pRight; - int mergetype = MERGE_NEAR; + int mergetype = isReqPos ? MERGE_POS_NEAR : MERGE_NEAR; + if( pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR ){ mergetype = MERGE_POS_NEAR; } pLeft = pExpr->pLeft; while( pLeft->eType==FTSQUERY_NEAR ){ @@ -110848,21 +109527,21 @@ ** so that a buffer of zero bytes is never allocated - this can ** cause fts3DoclistMerge() to incorrectly return SQLITE_NOMEM. */ char *aBuffer = sqlite3_malloc(nRight+nLeft+1); rc = fts3DoclistMerge(MERGE_OR, 0, 0, aBuffer, pnOut, - aLeft, nLeft, aRight, nRight, 0 + aLeft, nLeft, aRight, nRight ); *paOut = aBuffer; sqlite3_free(aLeft); break; } default: { assert( FTSQUERY_NOT==MERGE_NOT && FTSQUERY_AND==MERGE_AND ); fts3DoclistMerge(pExpr->eType, 0, 0, aLeft, pnOut, - aLeft, nLeft, aRight, nRight, 0 + aLeft, nLeft, aRight, nRight ); *paOut = aLeft; break; } } @@ -110869,92 +109548,10 @@ } sqlite3_free(aRight); } } - return rc; -} - -/* -** This function is called from within xNext() for each row visited by -** an FTS3 query. If evaluating the FTS3 query expression within xFilter() -** was able to determine the exact set of matching rows, this function sets -** *pbRes to true and returns SQLITE_IO immediately. -** -** Otherwise, if evaluating the query expression within xFilter() returned a -** superset of the matching documents instead of an exact set (this happens -** when the query includes very common tokens and it is deemed too expensive to -** load their doclists from disk), this function tests if the current row -** really does match the FTS3 query. -** -** If an error occurs, an SQLite error code is returned. Otherwise, SQLITE_OK -** is returned and *pbRes is set to true if the current row matches the -** FTS3 query (and should be included in the results returned to SQLite), or -** false otherwise. -*/ -static int fts3EvalDeferred( - Fts3Cursor *pCsr, /* FTS3 cursor pointing at row to test */ - int *pbRes /* OUT: Set to true if row is a match */ -){ - int rc = SQLITE_OK; - if( pCsr->pDeferred==0 ){ - *pbRes = 1; - }else{ - rc = fts3CursorSeek(0, pCsr); - if( rc==SQLITE_OK ){ - sqlite3Fts3FreeDeferredDoclists(pCsr); - rc = sqlite3Fts3CacheDeferredDoclists(pCsr); - } - if( rc==SQLITE_OK ){ - char *a = 0; - int n = 0; - rc = fts3EvalExpr(pCsr, pCsr->pExpr, &a, &n, 0); - assert( n>=0 ); - *pbRes = (n>0); - sqlite3_free(a); - } - } - return rc; -} - -/* -** Advance the cursor to the next row in the %_content table that -** matches the search criteria. For a MATCH search, this will be -** the next row that matches. For a full-table scan, this will be -** simply the next row in the %_content table. For a docid lookup, -** this routine simply sets the EOF flag. -** -** Return SQLITE_OK if nothing goes wrong. SQLITE_OK is returned -** even if we reach end-of-file. The fts3EofMethod() will be called -** subsequently to determine whether or not an EOF was hit. -*/ -static int fts3NextMethod(sqlite3_vtab_cursor *pCursor){ - int res; - int rc = SQLITE_OK; /* Return code */ - Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; - - pCsr->eEvalmode = FTS3_EVAL_NEXT; - do { - if( pCsr->aDoclist==0 ){ - if( SQLITE_ROW!=sqlite3_step(pCsr->pStmt) ){ - pCsr->isEof = 1; - rc = sqlite3_reset(pCsr->pStmt); - break; - } - pCsr->iPrevId = sqlite3_column_int64(pCsr->pStmt, 0); - }else{ - if( pCsr->pNextId>=&pCsr->aDoclist[pCsr->nDoclist] ){ - pCsr->isEof = 1; - break; - } - sqlite3_reset(pCsr->pStmt); - fts3GetDeltaVarint(&pCsr->pNextId, &pCsr->iPrevId); - pCsr->isRequireSeek = 1; - pCsr->isMatchinfoNeeded = 1; - } - }while( SQLITE_OK==(rc = fts3EvalDeferred(pCsr, &res)) && res==0 ); - return rc; } /* ** This is the xFilter interface for the virtual table. See @@ -110969,10 +109566,15 @@ ** ** If idxNum>=FTS3_FULLTEXT_SEARCH then use the full text index. The ** column on the left-hand side of the MATCH operator is column ** number idxNum-FTS3_FULLTEXT_SEARCH, 0 indexed. argv[0] is the right-hand ** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fts3FilterMethod() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts3 as appropriate. */ static int fts3FilterMethod( sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ int idxNum, /* Strategy index */ const char *idxStr, /* Unused */ @@ -110992,19 +109594,35 @@ UNUSED_PARAMETER(nVal); assert( idxNum>=0 && idxNum<=(FTS3_FULLTEXT_SEARCH+p->nColumn) ); assert( nVal==0 || nVal==1 ); assert( (nVal==0)==(idxNum==FTS3_FULLSCAN_SEARCH) ); - assert( p->pSegments==0 ); /* In case the cursor has been used before, clear it now. */ sqlite3_finalize(pCsr->pStmt); sqlite3_free(pCsr->aDoclist); sqlite3Fts3ExprFree(pCsr->pExpr); memset(&pCursor[1], 0, sizeof(Fts3Cursor)-sizeof(sqlite3_vtab_cursor)); - if( idxNum!=FTS3_DOCID_SEARCH && idxNum!=FTS3_FULLSCAN_SEARCH ){ + /* Compile a SELECT statement for this cursor. For a full-table-scan, the + ** statement loops through all rows of the %_content table. For a + ** full-text query or docid lookup, the statement retrieves a single + ** row by docid. + */ + zSql = sqlite3_mprintf(azSql[idxNum==FTS3_FULLSCAN_SEARCH], p->zDb, p->zName); + if( !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); + sqlite3_free(zSql); + } + if( rc!=SQLITE_OK ) return rc; + pCsr->eSearch = (i16)idxNum; + + if( idxNum==FTS3_DOCID_SEARCH ){ + rc = sqlite3_bind_value(pCsr->pStmt, 1, apVal[0]); + }else if( idxNum!=FTS3_FULLSCAN_SEARCH ){ int iCol = idxNum-FTS3_FULLTEXT_SEARCH; const char *zQuery = (const char *)sqlite3_value_text(apVal[0]); if( zQuery==0 && sqlite3_value_type(apVal[0])!=SQLITE_NULL ){ return SQLITE_NOMEM; @@ -111022,34 +109640,15 @@ } rc = sqlite3Fts3ReadLock(p); if( rc!=SQLITE_OK ) return rc; - rc = fts3EvalExpr(pCsr, pCsr->pExpr, &pCsr->aDoclist, &pCsr->nDoclist, 0); - sqlite3Fts3SegmentsClose(p); - if( rc!=SQLITE_OK ) return rc; + rc = evalFts3Expr(p, pCsr->pExpr, &pCsr->aDoclist, &pCsr->nDoclist, 0); pCsr->pNextId = pCsr->aDoclist; pCsr->iPrevId = 0; } - /* Compile a SELECT statement for this cursor. For a full-table-scan, the - ** statement loops through all rows of the %_content table. For a - ** full-text query or docid lookup, the statement retrieves a single - ** row by docid. - */ - zSql = sqlite3_mprintf(azSql[idxNum==FTS3_FULLSCAN_SEARCH], p->zDb, p->zName); - if( !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); - sqlite3_free(zSql); - } - if( rc==SQLITE_OK && idxNum==FTS3_DOCID_SEARCH ){ - rc = sqlite3_bind_value(pCsr->pStmt, 1, apVal[0]); - } - pCsr->eSearch = (i16)idxNum; - if( rc!=SQLITE_OK ) return rc; return fts3NextMethod(pCursor); } /* @@ -111069,15 +109668,10 @@ static int fts3RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; if( pCsr->aDoclist ){ *pRowid = pCsr->iPrevId; }else{ - /* This branch runs if the query is implemented using a full-table scan - ** (not using the full-text index). In this case grab the rowid from the - ** SELECT statement. - */ - assert( pCsr->isRequireSeek==0 ); *pRowid = sqlite3_column_int64(pCsr->pStmt, 0); } return SQLITE_OK; } @@ -111136,13 +109730,11 @@ /* ** Implementation of xSync() method. Flush the contents of the pending-terms ** hash-table to the database. */ static int fts3SyncMethod(sqlite3_vtab *pVtab){ - int rc = sqlite3Fts3PendingTermsFlush((Fts3Table *)pVtab); - sqlite3Fts3SegmentsClose((Fts3Table *)pVtab); - return rc; + return sqlite3Fts3PendingTermsFlush((Fts3Table *)pVtab); } /* ** Implementation of xBegin() method. This is a no-op. */ @@ -111176,31 +109768,12 @@ ** Load the doclist associated with expression pExpr to pExpr->aDoclist. ** The loaded doclist contains positions as well as the document ids. ** This is used by the matchinfo(), snippet() and offsets() auxillary ** functions. */ -SQLITE_PRIVATE int sqlite3Fts3ExprLoadDoclist(Fts3Cursor *pCsr, Fts3Expr *pExpr){ - int rc; - assert( pExpr->eType==FTSQUERY_PHRASE && pExpr->pPhrase ); - assert( pCsr->eEvalmode==FTS3_EVAL_NEXT ); - rc = fts3EvalExpr(pCsr, pExpr, &pExpr->aDoclist, &pExpr->nDoclist, 1); - return rc; -} - -SQLITE_PRIVATE int sqlite3Fts3ExprLoadFtDoclist( - Fts3Cursor *pCsr, - Fts3Expr *pExpr, - char **paDoclist, - int *pnDoclist -){ - int rc; - assert( pCsr->eEvalmode==FTS3_EVAL_NEXT ); - assert( pExpr->eType==FTSQUERY_PHRASE && pExpr->pPhrase ); - pCsr->eEvalmode = FTS3_EVAL_MATCHINFO; - rc = fts3EvalExpr(pCsr, pExpr, paDoclist, pnDoclist, 1); - pCsr->eEvalmode = FTS3_EVAL_NEXT; - return rc; +SQLITE_PRIVATE int sqlite3Fts3ExprLoadDoclist(Fts3Table *pTab, Fts3Expr *pExpr){ + return evalFts3Expr(pTab, pExpr, &pExpr->aDoclist, &pExpr->nDoclist, 1); } /* ** After ExprLoadDoclist() (see above) has been called, this function is ** used to iterate/search through the position lists that make up the doclist @@ -111262,11 +109835,11 @@ */ static int fts3FunctionArg( sqlite3_context *pContext, /* SQL function call context */ const char *zFunc, /* Function name */ sqlite3_value *pVal, /* argv[0] passed to function */ - Fts3Cursor **ppCsr /* OUT: Store cursor handle here */ + Fts3Cursor **ppCsr /* OUT: Store cursor handle here */ ){ Fts3Cursor *pRet; if( sqlite3_value_type(pVal)!=SQLITE_BLOB || sqlite3_value_bytes(pVal)!=sizeof(Fts3Cursor *) ){ @@ -111388,11 +109961,17 @@ sqlite3_context *pContext, /* SQLite function call context */ int nVal, /* Size of argument array */ sqlite3_value **apVal /* Array of arguments */ ){ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ - assert( nVal==1 ); + + if( nVal!=1 ){ + sqlite3_result_error(pContext, + "wrong number of arguments to function matchinfo()", -1); + return; + } + if( SQLITE_OK==fts3FunctionArg(pContext, "matchinfo", apVal[0], &pCsr) ){ sqlite3Fts3Matchinfo(pContext, pCsr); } } @@ -111451,17 +110030,16 @@ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';", p->zDb, p->zName, zName ); + if( rc==SQLITE_ERROR ) rc = SQLITE_OK; if( p->bHasDocsize ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_docsize' RENAME TO '%q_docsize';", p->zDb, p->zName, zName ); - } - if( p->bHasStat ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_stat' RENAME TO '%q_stat';", p->zDb, p->zName, zName ); } @@ -111482,11 +110060,11 @@ /* xConnect */ fts3ConnectMethod, /* xBestIndex */ fts3BestIndexMethod, /* xDisconnect */ fts3DisconnectMethod, /* xDestroy */ fts3DestroyMethod, /* xOpen */ fts3OpenMethod, - /* xClose */ fts3CloseMethod, + /* xClose */ fulltextClose, /* xFilter */ fts3FilterMethod, /* xNext */ fts3NextMethod, /* xEof */ fts3EofMethod, /* xColumn */ fts3ColumnMethod, /* xRowid */ fts3RowidMethod, @@ -111509,24 +110087,23 @@ sqlite3Fts3HashClear(pHash); sqlite3_free(pHash); } /* -** The fts3 built-in tokenizers - "simple", "porter" and "icu"- are -** implemented in files fts3_tokenizer1.c, fts3_porter.c and fts3_icu.c -** respectively. The following three forward declarations are for functions -** declared in these files used to retrieve the respective implementations. +** The fts3 built-in tokenizers - "simple" and "porter" - are implemented +** in files fts3_tokenizer1.c and fts3_porter.c respectively. The following +** two forward declarations are for functions declared in these files +** used to retrieve the respective implementations. ** ** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed ** to by the argument to point to the "simple" tokenizer implementation. -** And so on. +** Function ...PorterTokenizerModule() sets *pModule to point to the +** porter tokenizer/stemmer implementation. */ SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); -#ifdef SQLITE_ENABLE_ICU SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); -#endif /* ** Initialise the fts3 extension. If this extension is built as part ** of the sqlite library, then this function is called directly by ** SQLite. If fts3 is built as a dynamically loadable extension, this @@ -111578,11 +110155,11 @@ */ if( SQLITE_OK==rc && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", -1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1)) ){ rc = sqlite3_create_module_v2( db, "fts3", &fts3Module, (void *)pHash, hashDestroy ); @@ -111720,22 +110297,10 @@ ** negative values). */ static int fts3isspace(char c){ return c==' ' || c=='\t' || c=='\n' || c=='\r' || c=='\v' || c=='\f'; } - -/* -** Allocate nByte bytes of memory using sqlite3_malloc(). If successful, -** zero the memory before returning a pointer to it. If unsuccessful, -** return NULL. -*/ -static void *fts3MallocZero(int nByte){ - void *pRet = sqlite3_malloc(nByte); - if( pRet ) memset(pRet, 0, nByte); - return pRet; -} - /* ** Extract the next token from buffer z (length n) using the tokenizer ** and other information (column names etc.) in pParse. Create an Fts3Expr ** structure of type FTSQUERY_PHRASE containing a phrase consisting of this @@ -111770,14 +110335,15 @@ pCursor->pTokenizer = pTokenizer; rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); if( rc==SQLITE_OK ){ nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; - pRet = (Fts3Expr *)fts3MallocZero(nByte); + pRet = (Fts3Expr *)sqlite3_malloc(nByte); if( !pRet ){ rc = SQLITE_NOMEM; }else{ + memset(pRet, 0, nByte); pRet->eType = FTSQUERY_PHRASE; pRet->pPhrase = (Fts3Phrase *)&pRet[1]; pRet->pPhrase->nToken = 1; pRet->pPhrase->iColumn = iCol; pRet->pPhrase->aToken[0].n = nToken; @@ -111849,21 +110415,20 @@ const char *zToken; int nToken, iBegin, iEnd, iPos; rc = pModule->xNext(pCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); if( rc==SQLITE_OK ){ int nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase); - p = fts3ReallocOrFree(p, nByte+ii*sizeof(Fts3PhraseToken)); + p = fts3ReallocOrFree(p, nByte+ii*sizeof(struct PhraseToken)); zTemp = fts3ReallocOrFree(zTemp, nTemp + nToken); if( !p || !zTemp ){ goto no_mem; } if( ii==0 ){ memset(p, 0, nByte); p->pPhrase = (Fts3Phrase *)&p[1]; } p->pPhrase = (Fts3Phrase *)&p[1]; - memset(&p->pPhrase->aToken[ii], 0, sizeof(Fts3PhraseToken)); p->pPhrase->nToken = ii+1; p->pPhrase->aToken[ii].n = nToken; memcpy(&zTemp[nTemp], zToken, nToken); nTemp += nToken; if( iEndpPhrase->nToken-1):0) * sizeof(Fts3PhraseToken); + nByte += (p?(p->pPhrase->nToken-1):0) * sizeof(struct PhraseToken); p = fts3ReallocOrFree(p, nByte + nTemp); if( !p ){ goto no_mem; } if( zTemp ){ @@ -111999,14 +110564,15 @@ */ cNext = zInput[nKey]; if( fts3isspace(cNext) || cNext=='"' || cNext=='(' || cNext==')' || cNext==0 ){ - pRet = (Fts3Expr *)fts3MallocZero(sizeof(Fts3Expr)); + pRet = (Fts3Expr *)sqlite3_malloc(sizeof(Fts3Expr)); if( !pRet ){ return SQLITE_NOMEM; } + memset(pRet, 0, sizeof(Fts3Expr)); pRet->eType = pKey->eType; pRet->nNear = nNear; *ppExpr = pRet; *pnConsumed = (int)((zInput - z) + nKey); return SQLITE_OK; @@ -112178,16 +110744,17 @@ if( !sqlite3_fts3_enable_parentheses && p->eType==FTSQUERY_PHRASE && p->pPhrase->isNot ){ /* Create an implicit NOT operator. */ - Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr)); + Fts3Expr *pNot = sqlite3_malloc(sizeof(Fts3Expr)); if( !pNot ){ sqlite3Fts3ExprFree(p); rc = SQLITE_NOMEM; goto exprparse_out; } + memset(pNot, 0, sizeof(Fts3Expr)); pNot->eType = FTSQUERY_NOT; pNot->pRight = p; if( pNotBranch ){ pNot->pLeft = pNotBranch; } @@ -112211,16 +110778,17 @@ if( isPhrase && !isRequirePhrase ){ /* Insert an implicit AND operator. */ Fts3Expr *pAnd; assert( pRet && pPrev ); - pAnd = fts3MallocZero(sizeof(Fts3Expr)); + pAnd = sqlite3_malloc(sizeof(Fts3Expr)); if( !pAnd ){ sqlite3Fts3ExprFree(p); rc = SQLITE_NOMEM; goto exprparse_out; } + memset(pAnd, 0, sizeof(Fts3Expr)); pAnd->eType = FTSQUERY_AND; insertBinaryOperator(&pRet, pPrev, pAnd); pPrev = pAnd; } @@ -113666,11 +112234,11 @@ } sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); } -SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char c){ +static int fts3IsIdChar(char c){ static const char isFtsIdChar[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ @@ -113704,13 +112272,13 @@ while( *z2 && z2[0]!=']' ) z2++; if( *z2 ) z2++; break; default: - if( sqlite3Fts3IsIdChar(*z1) ){ + if( fts3IsIdChar(*z1) ){ z2 = &z1[1]; - while( sqlite3Fts3IsIdChar(*z2) ) z2++; + while( fts3IsIdChar(*z2) ) z2++; }else{ z1++; } } } @@ -113719,30 +112287,42 @@ return z1; } SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( Fts3Hash *pHash, /* Tokenizer hash table */ - const char *zArg, /* Tokenizer name */ + const char *zArg, /* Possible tokenizer specification */ sqlite3_tokenizer **ppTok, /* OUT: Tokenizer (if applicable) */ + const char **pzTokenizer, /* OUT: Set to zArg if is tokenizer */ char **pzErr /* OUT: Set to malloced error message */ ){ int rc; char *z = (char *)zArg; int n; char *zCopy; char *zEnd; /* Pointer to nul-term of zCopy */ sqlite3_tokenizer_module *m; - zCopy = sqlite3_mprintf("%s", zArg); - if( !zCopy ) return SQLITE_NOMEM; + if( !z ){ + zCopy = sqlite3_mprintf("simple"); + }else{ + if( sqlite3_strnicmp(z, "tokenize", 8) || fts3IsIdChar(z[8])){ + return SQLITE_OK; + } + zCopy = sqlite3_mprintf("%s", &z[8]); + *pzTokenizer = zArg; + } + if( !zCopy ){ + return SQLITE_NOMEM; + } + zEnd = &zCopy[strlen(zCopy)]; z = (char *)sqlite3Fts3NextToken(zCopy, &n); z[n] = '\0'; sqlite3Fts3Dequote(z); - m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash,z,(int)strlen(z)+1); + m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, z, (int)strlen(z)+1); if( !m ){ *pzErr = sqlite3_mprintf("unknown tokenizer: %s", z); rc = SQLITE_ERROR; }else{ char const **aArg = 0; @@ -114307,22 +112887,10 @@ */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) -/* -** When full-text index nodes are loaded from disk, the buffer that they -** are loaded into has the following number of bytes of padding at the end -** of it. i.e. if a full-text index node is 900 bytes in size, then a buffer -** of 920 bytes is allocated for it. -** -** This means that if we have a pointer into a buffer containing node data, -** it is always safe to read up to two varints from it without risking an -** overread, even if the node data is corrupted. -*/ -#define FTS3_NODE_PADDING (FTS3_VARINT_MAX*2) - typedef struct PendingList PendingList; typedef struct SegmentNode SegmentNode; typedef struct SegmentWriter SegmentWriter; /* @@ -114337,31 +112905,19 @@ sqlite3_int64 iLastDocid; sqlite3_int64 iLastCol; sqlite3_int64 iLastPos; }; - -/* -** Each cursor has a (possibly empty) linked list of the following objects. -*/ -struct Fts3DeferredToken { - Fts3PhraseToken *pToken; /* Pointer to corresponding expr token */ - int iCol; /* Column token must occur in */ - Fts3DeferredToken *pNext; /* Next in list of deferred tokens */ - PendingList *pList; /* Doclist is assembled here */ -}; - /* ** An instance of this structure is used to iterate through the terms on ** a contiguous set of segment b-tree leaf nodes. Although the details of ** this structure are only manipulated by code in this file, opaque handles ** of type Fts3SegReader* are also used by code in fts3.c to iterate through ** terms when querying the full-text index. See functions: ** ** sqlite3Fts3SegReaderNew() ** sqlite3Fts3SegReaderFree() -** sqlite3Fts3SegReaderCost() ** sqlite3Fts3SegReaderIterate() ** ** Methods used to manipulate Fts3SegReader structures: ** ** fts3SegReaderNext() @@ -114368,38 +112924,34 @@ ** fts3SegReaderFirstDocid() ** fts3SegReaderNextDocid() */ struct Fts3SegReader { int iIdx; /* Index within level, or 0x7FFFFFFF for PT */ - - sqlite3_int64 iStartBlock; /* Rowid of first leaf block to traverse */ - sqlite3_int64 iLeafEndBlock; /* Rowid of final leaf block to traverse */ - sqlite3_int64 iEndBlock; /* Rowid of final block in segment (or 0) */ - sqlite3_int64 iCurrentBlock; /* Current leaf block (or 0) */ - + sqlite3_int64 iStartBlock; + sqlite3_int64 iEndBlock; + sqlite3_stmt *pStmt; /* SQL Statement to access leaf nodes */ char *aNode; /* Pointer to node data (or NULL) */ int nNode; /* Size of buffer at aNode (or 0) */ + int nTermAlloc; /* Allocated size of zTerm buffer */ Fts3HashElem **ppNextElem; /* Variables set by fts3SegReaderNext(). These may be read directly ** by the caller. They are valid from the time SegmentReaderNew() returns ** until SegmentReaderNext() returns something other than SQLITE_OK ** (i.e. SQLITE_DONE). */ int nTerm; /* Number of bytes in current term */ char *zTerm; /* Pointer to current term */ - int nTermAlloc; /* Allocated size of zTerm buffer */ char *aDoclist; /* Pointer to doclist of current entry */ int nDoclist; /* Size of doclist in current entry */ /* The following variables are used to iterate through the current doclist */ char *pOffsetList; sqlite3_int64 iDocid; }; #define fts3SegReaderIsPending(p) ((p)->ppNextElem!=0) -#define fts3SegReaderIsRootOnly(p) ((p)->aNode==(char *)&(p)[1]) /* ** An instance of this structure is used to create a segment b-tree in the ** database. The internal details of this type are only accessed by the ** following functions: @@ -114464,15 +113016,16 @@ #define SQL_SELECT_LEVEL_COUNT 14 #define SQL_SELECT_SEGDIR_COUNT_MAX 15 #define SQL_DELETE_SEGDIR_BY_LEVEL 16 #define SQL_DELETE_SEGMENTS_RANGE 17 #define SQL_CONTENT_INSERT 18 -#define SQL_DELETE_DOCSIZE 19 -#define SQL_REPLACE_DOCSIZE 20 -#define SQL_SELECT_DOCSIZE 21 -#define SQL_SELECT_DOCTOTAL 22 -#define SQL_REPLACE_DOCTOTAL 23 +#define SQL_GET_BLOCK 19 +#define SQL_DELETE_DOCSIZE 20 +#define SQL_REPLACE_DOCSIZE 21 +#define SQL_SELECT_DOCSIZE 22 +#define SQL_SELECT_DOCTOTAL 23 +#define SQL_REPLACE_DOCTOTAL 24 /* ** This function is used to obtain an SQLite prepared statement handle ** for the statement identified by the second argument. If successful, ** *pp is set to the requested statement handle and SQLITE_OK returned. @@ -114513,15 +113066,16 @@ /* 15 */ "SELECT count(*), max(level) FROM %Q.'%q_segdir'", /* 16 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ?", /* 17 */ "DELETE FROM %Q.'%q_segments' WHERE blockid BETWEEN ? AND ?", /* 18 */ "INSERT INTO %Q.'%q_content' VALUES(%z)", -/* 19 */ "DELETE FROM %Q.'%q_docsize' WHERE docid = ?", -/* 20 */ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", -/* 21 */ "SELECT size FROM %Q.'%q_docsize' WHERE docid=?", -/* 22 */ "SELECT value FROM %Q.'%q_stat' WHERE id=0", -/* 23 */ "REPLACE INTO %Q.'%q_stat' VALUES(0,?)", +/* 19 */ "SELECT block FROM %Q.'%q_segments' WHERE blockid = ?", +/* 20 */ "DELETE FROM %Q.'%q_docsize' WHERE docid = ?", +/* 21 */ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", +/* 22 */ "SELECT size FROM %Q.'%q_docsize' WHERE docid=?", +/* 23 */ "SELECT value FROM %Q.'%q_stat' WHERE id=0", +/* 24 */ "REPLACE INTO %Q.'%q_stat' VALUES(0,?)", }; int rc = SQLITE_OK; sqlite3_stmt *pStmt; assert( SizeofArray(azSql)==SizeofArray(p->aStmt) ); @@ -114591,10 +113145,49 @@ rc = sqlite3_reset(pStmt); } *pRC = rc; } + +/* +** Read a single block from the %_segments table. If the specified block +** does not exist, return SQLITE_CORRUPT. If some other error (malloc, IO +** etc.) occurs, return the appropriate SQLite error code. +** +** Otherwise, if successful, set *pzBlock to point to a buffer containing +** the block read from the database, and *pnBlock to the size of the read +** block in bytes. +** +** WARNING: The returned buffer is only valid until the next call to +** sqlite3Fts3ReadBlock(). +*/ +SQLITE_PRIVATE int sqlite3Fts3ReadBlock( + Fts3Table *p, + sqlite3_int64 iBlock, + char const **pzBlock, + int *pnBlock +){ + sqlite3_stmt *pStmt; + int rc = fts3SqlStmt(p, SQL_GET_BLOCK, &pStmt, 0); + if( rc!=SQLITE_OK ) return rc; + sqlite3_reset(pStmt); + + if( pzBlock ){ + sqlite3_bind_int64(pStmt, 1, iBlock); + rc = sqlite3_step(pStmt); + if( rc!=SQLITE_ROW ){ + return (rc==SQLITE_DONE ? SQLITE_CORRUPT : rc); + } + + *pnBlock = sqlite3_column_bytes(pStmt, 0); + *pzBlock = (char *)sqlite3_column_blob(pStmt, 0); + if( sqlite3_column_type(pStmt, 0)!=SQLITE_BLOB ){ + return SQLITE_CORRUPT; + } + } + return SQLITE_OK; +} /* ** This function ensures that the caller has obtained a shared-cache ** table-lock on the %_content table. This is required before reading ** data from the fts3 table. If this lock is not acquired first, then @@ -114760,14 +113353,14 @@ ** p->iPrevDocid, and the column is specified by argument iCol. ** ** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. */ static int fts3PendingTermsAdd( - Fts3Table *p, /* Table into which text will be inserted */ - const char *zText, /* Text of document to be inserted */ - int iCol, /* Column into which text is being inserted */ - u32 *pnWord /* OUT: Number of tokens inserted */ + Fts3Table *p, /* FTS table into which text will be inserted */ + const char *zText, /* Text of document to be inseted */ + int iCol, /* Column number into which text is inserted */ + u32 *pnWord /* OUT: Number of tokens inserted */ ){ int rc; int iStart; int iEnd; int iPos; @@ -114848,13 +113441,10 @@ } p->iPrevDocid = iDocid; return SQLITE_OK; } -/* -** Discard the contents of the pending-terms hash table. -*/ SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *p){ Fts3HashElem *pElem; for(pElem=fts3HashFirst(&p->pendingTerms); pElem; pElem=fts3HashNext(pElem)){ sqlite3_free(fts3HashData(pElem)); } @@ -114878,11 +113468,10 @@ int rc = fts3PendingTermsAdd(p, zText, i-2, &aSz[i-2]); if( rc!=SQLITE_OK ){ return rc; } } - aSz[p->nColumn] += sqlite3_value_bytes(apVal[i]); } return SQLITE_OK; } /* @@ -114966,12 +113555,10 @@ fts3SqlExec(&rc, p, SQL_DELETE_ALL_CONTENT, 0); fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGMENTS, 0); fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGDIR, 0); if( p->bHasDocsize ){ fts3SqlExec(&rc, p, SQL_DELETE_ALL_DOCSIZE, 0); - } - if( p->bHasStat ){ fts3SqlExec(&rc, p, SQL_DELETE_ALL_STAT, 0); } return rc; } @@ -114978,11 +113565,11 @@ /* ** The first element in the apVal[] array is assumed to contain the docid ** (an integer) of a row about to be deleted. Remove all terms from the ** full-text index. */ -static void fts3DeleteTerms( +static void fts3DeleteTerms( int *pRC, /* Result code */ Fts3Table *p, /* The FTS table to delete from */ sqlite3_value **apVal, /* apVal[] contains the docid to be deleted */ u32 *aSz /* Sizes of deleted document written here */ ){ @@ -115000,11 +113587,10 @@ if( rc!=SQLITE_OK ){ sqlite3_reset(pSelect); *pRC = rc; return; } - aSz[p->nColumn] += sqlite3_column_bytes(pSelect, i); } } rc = sqlite3_reset(pSelect); }else{ sqlite3_reset(pSelect); @@ -115063,97 +113649,16 @@ } return rc; } -/* -** The %_segments table is declared as follows: -** -** CREATE TABLE %_segments(blockid INTEGER PRIMARY KEY, block BLOB) -** -** This function reads data from a single row of the %_segments table. The -** specific row is identified by the iBlockid parameter. If paBlob is not -** NULL, then a buffer is allocated using sqlite3_malloc() and populated -** with the contents of the blob stored in the "block" column of the -** identified table row is. Whether or not paBlob is NULL, *pnBlob is set -** to the size of the blob in bytes before returning. -** -** If an error occurs, or the table does not contain the specified row, -** an SQLite error code is returned. Otherwise, SQLITE_OK is returned. If -** paBlob is non-NULL, then it is the responsibility of the caller to -** eventually free the returned buffer. -** -** This function may leave an open sqlite3_blob* handle in the -** Fts3Table.pSegments variable. This handle is reused by subsequent calls -** to this function. The handle may be closed by calling the -** sqlite3Fts3SegmentsClose() function. Reusing a blob handle is a handy -** performance improvement, but the blob handle should always be closed -** before control is returned to the user (to prevent a lock being held -** on the database file for longer than necessary). Thus, any virtual table -** method (xFilter etc.) that may directly or indirectly call this function -** must call sqlite3Fts3SegmentsClose() before returning. -*/ -SQLITE_PRIVATE int sqlite3Fts3ReadBlock( - Fts3Table *p, /* FTS3 table handle */ - sqlite3_int64 iBlockid, /* Access the row with blockid=$iBlockid */ - char **paBlob, /* OUT: Blob data in malloc'd buffer */ - int *pnBlob /* OUT: Size of blob data */ -){ - int rc; /* Return code */ - - /* pnBlob must be non-NULL. paBlob may be NULL or non-NULL. */ - assert( pnBlob); - - if( p->pSegments ){ - rc = sqlite3_blob_reopen(p->pSegments, iBlockid); - }else{ - if( 0==p->zSegmentsTbl ){ - p->zSegmentsTbl = sqlite3_mprintf("%s_segments", p->zName); - if( 0==p->zSegmentsTbl ) return SQLITE_NOMEM; - } - rc = sqlite3_blob_open( - p->db, p->zDb, p->zSegmentsTbl, "block", iBlockid, 0, &p->pSegments - ); - } - - if( rc==SQLITE_OK ){ - int nByte = sqlite3_blob_bytes(p->pSegments); - if( paBlob ){ - char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING); - if( !aByte ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_blob_read(p->pSegments, aByte, nByte, 0); - memset(&aByte[nByte], 0, FTS3_NODE_PADDING); - if( rc!=SQLITE_OK ){ - sqlite3_free(aByte); - aByte = 0; - } - } - *paBlob = aByte; - } - *pnBlob = nByte; - } - - return rc; -} - -/* -** Close the blob handle at p->pSegments, if it is open. See comments above -** the sqlite3Fts3ReadBlock() function for details. -*/ -SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *p){ - sqlite3_blob_close(p->pSegments); - p->pSegments = 0; -} - /* ** Move the iterator passed as the first argument to the next term in the ** segment. If successful, SQLITE_OK is returned. If there is no next term, ** SQLITE_DONE. Otherwise, an SQLite error code. */ -static int fts3SegReaderNext(Fts3Table *p, Fts3SegReader *pReader){ +static int fts3SegReaderNext(Fts3SegReader *pReader){ char *pNext; /* Cursor variable */ int nPrefix; /* Number of bytes in term prefix */ int nSuffix; /* Number of bytes in term suffix */ if( !pReader->aDoclist ){ @@ -115161,12 +113666,11 @@ }else{ pNext = &pReader->aDoclist[pReader->nDoclist]; } if( !pNext || pNext>=&pReader->aNode[pReader->nNode] ){ - int rc; /* Return code from Fts3ReadBlock() */ - + int rc; if( fts3SegReaderIsPending(pReader) ){ Fts3HashElem *pElem = *(pReader->ppNextElem); if( pElem==0 ){ pReader->aNode = 0; }else{ @@ -115178,40 +113682,26 @@ pReader->ppNextElem++; assert( pReader->aNode ); } return SQLITE_OK; } - - if( !fts3SegReaderIsRootOnly(pReader) ){ - sqlite3_free(pReader->aNode); - } - pReader->aNode = 0; - - /* If iCurrentBlock>=iLeafEndBlock, this is an EOF condition. All leaf - ** blocks have already been traversed. */ - assert( pReader->iCurrentBlock<=pReader->iLeafEndBlock ); - if( pReader->iCurrentBlock>=pReader->iLeafEndBlock ){ + if( !pReader->pStmt ){ + pReader->aNode = 0; return SQLITE_OK; } - - rc = sqlite3Fts3ReadBlock( - p, ++pReader->iCurrentBlock, &pReader->aNode, &pReader->nNode - ); - if( rc!=SQLITE_OK ) return rc; + rc = sqlite3_step(pReader->pStmt); + if( rc!=SQLITE_ROW ){ + pReader->aNode = 0; + return (rc==SQLITE_DONE ? SQLITE_OK : rc); + } + pReader->nNode = sqlite3_column_bytes(pReader->pStmt, 0); + pReader->aNode = (char *)sqlite3_column_blob(pReader->pStmt, 0); pNext = pReader->aNode; } - /* Because of the FTS3_NODE_PADDING bytes of padding, the following is - ** safe (no risk of overread) even if the node data is corrupted. - */ pNext += sqlite3Fts3GetVarint32(pNext, &nPrefix); pNext += sqlite3Fts3GetVarint32(pNext, &nSuffix); - if( nPrefix<0 || nSuffix<=0 - || &pNext[nSuffix]>&pReader->aNode[pReader->nNode] - ){ - return SQLITE_CORRUPT; - } if( nPrefix+nSuffix>pReader->nTermAlloc ){ int nNew = (nPrefix+nSuffix)*2; char *zNew = sqlite3_realloc(pReader->zTerm, nNew); if( !zNew ){ @@ -115222,22 +113712,13 @@ } memcpy(&pReader->zTerm[nPrefix], pNext, nSuffix); pReader->nTerm = nPrefix+nSuffix; pNext += nSuffix; pNext += sqlite3Fts3GetVarint32(pNext, &pReader->nDoclist); + assert( pNext<&pReader->aNode[pReader->nNode] ); pReader->aDoclist = pNext; pReader->pOffsetList = 0; - - /* Check that the doclist does not appear to extend past the end of the - ** b-tree node. And that the final byte of the doclist is 0x00. If either - ** of these statements is untrue, then the data structure is corrupt. - */ - if( &pReader->aDoclist[pReader->nDoclist]>&pReader->aNode[pReader->nNode] - || pReader->aDoclist[pReader->nDoclist-1] - ){ - return SQLITE_CORRUPT; - } return SQLITE_OK; } /* ** Set the SegReader to point to the first docid in the doclist associated @@ -115296,104 +113777,29 @@ pReader->pOffsetList = p + sqlite3Fts3GetVarint(p, &iDelta); pReader->iDocid += iDelta; } } -/* -** This function is called to estimate the amount of data that will be -** loaded from the disk If SegReaderIterate() is called on this seg-reader, -** in units of average document size. -** -** This can be used as follows: If the caller has a small doclist that -** contains references to N documents, and is considering merging it with -** a large doclist (size X "average documents"), it may opt not to load -** the large doclist if X>N. -*/ -SQLITE_PRIVATE int sqlite3Fts3SegReaderCost( - Fts3Cursor *pCsr, /* FTS3 cursor handle */ - Fts3SegReader *pReader, /* Segment-reader handle */ - int *pnCost /* IN/OUT: Number of bytes read */ -){ - Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; - int rc = SQLITE_OK; /* Return code */ - int nCost = 0; /* Cost in bytes to return */ - int pgsz = p->nPgsz; /* Database page size */ - - /* If this seg-reader is reading the pending-terms table, or if all data - ** for the segment is stored on the root page of the b-tree, then the cost - ** is zero. In this case all required data is already in main memory. - */ - if( p->bHasStat - && !fts3SegReaderIsPending(pReader) - && !fts3SegReaderIsRootOnly(pReader) - ){ - int nBlob = 0; - sqlite3_int64 iBlock; - - if( pCsr->nRowAvg==0 ){ - /* The average document size, which is required to calculate the cost - ** of each doclist, has not yet been determined. Read the required - ** data from the %_stat table to calculate it. - ** - ** Entry 0 of the %_stat table is a blob containing (nCol+1) FTS3 - ** varints, where nCol is the number of columns in the FTS3 table. - ** The first varint is the number of documents currently stored in - ** the table. The following nCol varints contain the total amount of - ** data stored in all rows of each column of the table, from left - ** to right. - */ - sqlite3_stmt *pStmt; - rc = fts3SqlStmt(p, SQL_SELECT_DOCTOTAL, &pStmt, 0); - if( rc ) return rc; - if( sqlite3_step(pStmt)==SQLITE_ROW ){ - sqlite3_int64 nDoc = 0; - sqlite3_int64 nByte = 0; - const char *a = sqlite3_column_blob(pStmt, 0); - if( a ){ - const char *pEnd = &a[sqlite3_column_bytes(pStmt, 0)]; - a += sqlite3Fts3GetVarint(a, &nDoc); - while( anRowAvg = (((nByte / nDoc) + pgsz - 1) / pgsz); - } - rc = sqlite3_reset(pStmt); - if( rc!=SQLITE_OK || pCsr->nRowAvg==0 ) return rc; - } - - /* Assume that a blob flows over onto overflow pages if it is larger - ** than (pgsz-35) bytes in size (the file-format documentation - ** confirms this). - */ - for(iBlock=pReader->iStartBlock; iBlock<=pReader->iLeafEndBlock; iBlock++){ - rc = sqlite3Fts3ReadBlock(p, iBlock, 0, &nBlob); - if( rc!=SQLITE_OK ) break; - if( (nBlob+35)>pgsz ){ - int nOvfl = (nBlob + 34)/pgsz; - nCost += ((nOvfl + pCsr->nRowAvg - 1)/pCsr->nRowAvg); - } - } - } - - *pnCost += nCost; - return rc; -} - /* ** Free all allocations associated with the iterator passed as the ** second argument. */ SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3Table *p, Fts3SegReader *pReader){ - if( pReader && !fts3SegReaderIsPending(pReader) ){ - sqlite3_free(pReader->zTerm); - if( !fts3SegReaderIsRootOnly(pReader) ){ - sqlite3_free(pReader->aNode); + if( pReader ){ + if( pReader->pStmt ){ + /* Move the leaf-range SELECT statement to the aLeavesStmt[] array, + ** so that it can be reused when required by another query. + */ + assert( p->nLeavesStmtnLeavesTotal ); + sqlite3_reset(pReader->pStmt); + p->aLeavesStmt[p->nLeavesStmt++] = pReader->pStmt; } + if( !fts3SegReaderIsPending(pReader) ){ + sqlite3_free(pReader->zTerm); + } + sqlite3_free(pReader); } - sqlite3_free(pReader); } /* ** Allocate a new SegReader object. */ @@ -115409,35 +113815,77 @@ ){ int rc = SQLITE_OK; /* Return code */ Fts3SegReader *pReader; /* Newly allocated SegReader object */ int nExtra = 0; /* Bytes to allocate segment root node */ - assert( iStartLeaf<=iEndLeaf ); if( iStartLeaf==0 ){ - nExtra = nRoot + FTS3_NODE_PADDING; + nExtra = nRoot; } pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra); if( !pReader ){ return SQLITE_NOMEM; } memset(pReader, 0, sizeof(Fts3SegReader)); - pReader->iIdx = iAge; pReader->iStartBlock = iStartLeaf; - pReader->iLeafEndBlock = iEndLeaf; + pReader->iIdx = iAge; pReader->iEndBlock = iEndBlock; if( nExtra ){ /* The entire segment is stored in the root node. */ pReader->aNode = (char *)&pReader[1]; pReader->nNode = nRoot; memcpy(pReader->aNode, zRoot, nRoot); - memset(&pReader->aNode[nRoot], 0, FTS3_NODE_PADDING); }else{ - pReader->iCurrentBlock = iStartLeaf-1; + /* If the text of the SQL statement to iterate through a contiguous + ** set of entries in the %_segments table has not yet been composed, + ** compose it now. + */ + if( !p->zSelectLeaves ){ + p->zSelectLeaves = sqlite3_mprintf( + "SELECT block FROM %Q.'%q_segments' WHERE blockid BETWEEN ? AND ? " + "ORDER BY blockid", p->zDb, p->zName + ); + if( !p->zSelectLeaves ){ + rc = SQLITE_NOMEM; + goto finished; + } + } + + /* If there are no free statements in the aLeavesStmt[] array, prepare + ** a new statement now. Otherwise, reuse a prepared statement from + ** aLeavesStmt[]. + */ + if( p->nLeavesStmt==0 ){ + if( p->nLeavesTotal==p->nLeavesAlloc ){ + int nNew = p->nLeavesAlloc + 16; + sqlite3_stmt **aNew = (sqlite3_stmt **)sqlite3_realloc( + p->aLeavesStmt, nNew*sizeof(sqlite3_stmt *) + ); + if( !aNew ){ + rc = SQLITE_NOMEM; + goto finished; + } + p->nLeavesAlloc = nNew; + p->aLeavesStmt = aNew; + } + rc = sqlite3_prepare_v2(p->db, p->zSelectLeaves, -1, &pReader->pStmt, 0); + if( rc!=SQLITE_OK ){ + goto finished; + } + p->nLeavesTotal++; + }else{ + pReader->pStmt = p->aLeavesStmt[--p->nLeavesStmt]; + } + + /* Bind the start and end leaf blockids to the prepared SQL statement. */ + sqlite3_bind_int64(pReader->pStmt, 1, iStartLeaf); + sqlite3_bind_int64(pReader->pStmt, 2, iEndLeaf); } + rc = fts3SegReaderNext(pReader); + finished: if( rc==SQLITE_OK ){ *ppReader = pReader; }else{ sqlite3Fts3SegReaderFree(p, pReader); } @@ -115528,10 +113976,11 @@ }else{ memset(pReader, 0, nByte); pReader->iIdx = 0x7FFFFFFF; pReader->ppNextElem = (Fts3HashElem **)&pReader[1]; memcpy(pReader->ppNextElem, aElem, nElem*sizeof(Fts3HashElem *)); + fts3SegReaderNext(pReader); } } if( isPrefix ){ sqlite3_free(aElem); @@ -115769,11 +114218,11 @@ /* ** Add term zTerm to the SegmentNode. It is guaranteed that zTerm is larger ** (according to memcmp) than the previous term. */ static int fts3NodeAddTerm( - Fts3Table *p, /* Virtual table handle */ + Fts3Table *p, /* Virtual table handle */ SegmentNode **ppTree, /* IN/OUT: SegmentNode handle */ int isCopyTerm, /* True if zTerm/nTerm is transient */ const char *zTerm, /* Pointer to buffer containing term */ int nTerm /* Size of term in bytes */ ){ @@ -116399,18 +114848,19 @@ ** for, then advance each segment iterator until it points to a term of ** equal or greater value than the specified term. This prevents many ** unnecessary merge/sort operations for the case where single segment ** b-tree leaf nodes contain more than one term. */ - for(i=0; izTerm ){ int nTerm = pFilter->nTerm; const char *zTerm = pFilter->zTerm; - Fts3SegReader *pSeg = apSegment[i]; - do { - rc = fts3SegReaderNext(p, pSeg); - if( rc!=SQLITE_OK ) goto finished; - }while( zTerm && fts3SegReaderTermCmp(pSeg, zTerm, nTerm)<0 ); + for(i=0; iaNode ){ int nTerm = apSegment[0]->nTerm; @@ -116515,11 +114965,11 @@ if( pFilter->zTerm && !isPrefix ){ goto finished; } for(i=0; inColumn+2; - if( *pRC ) return; - a = sqlite3_malloc( (sizeof(u32)+10)*nStat ); + a = sqlite3_malloc( (sizeof(u32)+10)*(p->nColumn+1) ); if( a==0 ){ *pRC = SQLITE_NOMEM; return; } - pBlob = (char*)&a[nStat]; + pBlob = (char*)&a[p->nColumn+1]; rc = fts3SqlStmt(p, SQL_SELECT_DOCTOTAL, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; } if( sqlite3_step(pStmt)==SQLITE_ROW ){ - fts3DecodeIntArray(nStat, a, + fts3DecodeIntArray(p->nColumn+1, a, sqlite3_column_blob(pStmt, 0), sqlite3_column_bytes(pStmt, 0)); }else{ - memset(a, 0, sizeof(u32)*(nStat) ); + memset(a, 0, sizeof(u32)*(p->nColumn+1) ); } sqlite3_reset(pStmt); if( nChng<0 && a[0]<(u32)(-nChng) ){ a[0] = 0; }else{ a[0] += nChng; } - for(i=0; inColumn+1; i++){ + for(i=0; inColumn; i++){ u32 x = a[i+1]; if( x+aSzIns[i] < aSzDel[i] ){ x = 0; }else{ x = x + aSzIns[i] - aSzDel[i]; } a[i+1] = x; } - fts3EncodeIntArray(nStat, a, pBlob, &nBlob); + fts3EncodeIntArray(p->nColumn+1, a, pBlob, &nBlob); rc = fts3SqlStmt(p, SQL_REPLACE_DOCTOTAL, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; @@ -116942,162 +115380,12 @@ #endif }else{ rc = SQLITE_ERROR; } - sqlite3Fts3SegmentsClose(p); - return rc; -} - -/* -** Return the deferred doclist associated with deferred token pDeferred. -** This function assumes that sqlite3Fts3CacheDeferredDoclists() has already -** been called to allocate and populate the doclist. -*/ -SQLITE_PRIVATE char *sqlite3Fts3DeferredDoclist(Fts3DeferredToken *pDeferred, int *pnByte){ - if( pDeferred->pList ){ - *pnByte = pDeferred->pList->nData; - return pDeferred->pList->aData; - } - *pnByte = 0; - return 0; -} - -/* -** Helper fucntion for FreeDeferredDoclists(). This function removes all -** references to deferred doclists from within the tree of Fts3Expr -** structures headed by -*/ -static void fts3DeferredDoclistClear(Fts3Expr *pExpr){ - if( pExpr ){ - fts3DeferredDoclistClear(pExpr->pLeft); - fts3DeferredDoclistClear(pExpr->pRight); - if( pExpr->isLoaded ){ - sqlite3_free(pExpr->aDoclist); - pExpr->isLoaded = 0; - pExpr->aDoclist = 0; - pExpr->nDoclist = 0; - pExpr->pCurrent = 0; - pExpr->iCurrent = 0; - } - } -} - -/* -** Delete all cached deferred doclists. Deferred doclists are cached -** (allocated) by the sqlite3Fts3CacheDeferredDoclists() function. -*/ -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *pCsr){ - Fts3DeferredToken *pDef; - for(pDef=pCsr->pDeferred; pDef; pDef=pDef->pNext){ - sqlite3_free(pDef->pList); - pDef->pList = 0; - } - if( pCsr->pDeferred ){ - fts3DeferredDoclistClear(pCsr->pExpr); - } -} - -/* -** Free all entries in the pCsr->pDeffered list. Entries are added to -** this list using sqlite3Fts3DeferToken(). -*/ -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *pCsr){ - Fts3DeferredToken *pDef; - Fts3DeferredToken *pNext; - for(pDef=pCsr->pDeferred; pDef; pDef=pNext){ - pNext = pDef->pNext; - sqlite3_free(pDef->pList); - sqlite3_free(pDef); - } - pCsr->pDeferred = 0; -} - -/* -** Generate deferred-doclists for all tokens in the pCsr->pDeferred list -** based on the row that pCsr currently points to. -** -** A deferred-doclist is like any other doclist with position information -** included, except that it only contains entries for a single row of the -** table, not for all rows. -*/ -SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *pCsr){ - int rc = SQLITE_OK; /* Return code */ - if( pCsr->pDeferred ){ - int i; /* Used to iterate through table columns */ - sqlite3_int64 iDocid; /* Docid of the row pCsr points to */ - Fts3DeferredToken *pDef; /* Used to iterate through deferred tokens */ - - Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; - sqlite3_tokenizer *pT = p->pTokenizer; - sqlite3_tokenizer_module const *pModule = pT->pModule; - - assert( pCsr->isRequireSeek==0 ); - iDocid = sqlite3_column_int64(pCsr->pStmt, 0); - - for(i=0; inColumn && rc==SQLITE_OK; i++){ - const char *zText = (const char *)sqlite3_column_text(pCsr->pStmt, i+1); - sqlite3_tokenizer_cursor *pTC = 0; - - rc = pModule->xOpen(pT, zText, -1, &pTC); - while( rc==SQLITE_OK ){ - char const *zToken; /* Buffer containing token */ - int nToken; /* Number of bytes in token */ - int iDum1, iDum2; /* Dummy variables */ - int iPos; /* Position of token in zText */ - - pTC->pTokenizer = pT; - rc = pModule->xNext(pTC, &zToken, &nToken, &iDum1, &iDum2, &iPos); - for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ - Fts3PhraseToken *pPT = pDef->pToken; - if( (pDef->iCol>=p->nColumn || pDef->iCol==i) - && (pPT->n==nToken || (pPT->isPrefix && pPT->nz, pPT->n)) - ){ - fts3PendingListAppend(&pDef->pList, iDocid, i, iPos, &rc); - } - } - } - if( pTC ) pModule->xClose(pTC); - if( rc==SQLITE_DONE ) rc = SQLITE_OK; - } - - for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ - if( pDef->pList ){ - rc = fts3PendingListAppendVarint(&pDef->pList, 0); - } - } - } - - return rc; -} - -/* -** Add an entry for token pToken to the pCsr->pDeferred list. -*/ -SQLITE_PRIVATE int sqlite3Fts3DeferToken( - Fts3Cursor *pCsr, /* Fts3 table cursor */ - Fts3PhraseToken *pToken, /* Token to defer */ - int iCol /* Column that token must appear in (or -1) */ -){ - Fts3DeferredToken *pDeferred; - pDeferred = sqlite3_malloc(sizeof(*pDeferred)); - if( !pDeferred ){ - return SQLITE_NOMEM; - } - memset(pDeferred, 0, sizeof(*pDeferred)); - pDeferred->pToken = pToken; - pDeferred->pNext = pCsr->pDeferred; - pDeferred->iCol = iCol; - pCsr->pDeferred = pDeferred; - - assert( pToken->pDeferred==0 ); - pToken->pDeferred = pDeferred; - - return SQLITE_OK; -} - + return rc; +} /* ** This function does the work for the xUpdate method of FTS3 virtual ** tables. */ @@ -117113,21 +115401,20 @@ sqlite3_int64 iRemove = 0; /* Rowid removed by UPDATE or DELETE */ u32 *aSzIns; /* Sizes of inserted documents */ u32 *aSzDel; /* Sizes of deleted documents */ int nChng = 0; /* Net change in number of documents */ - assert( p->pSegments==0 ); /* Allocate space to hold the change in document sizes */ - aSzIns = sqlite3_malloc( sizeof(aSzIns[0])*(p->nColumn+1)*2 ); + aSzIns = sqlite3_malloc( sizeof(aSzIns[0])*p->nColumn*2 ); if( aSzIns==0 ) return SQLITE_NOMEM; - aSzDel = &aSzIns[p->nColumn+1]; - memset(aSzIns, 0, sizeof(aSzIns[0])*(p->nColumn+1)*2); + aSzDel = &aSzIns[p->nColumn]; + memset(aSzIns, 0, sizeof(aSzIns[0])*p->nColumn*2); /* If this is a DELETE or UPDATE operation, remove the old record. */ if( sqlite3_value_type(apVal[0])!=SQLITE_NULL ){ - int isEmpty = 0; + int isEmpty; rc = fts3IsEmpty(p, apVal, &isEmpty); if( rc==SQLITE_OK ){ if( isEmpty ){ /* Deleting this row means the whole table is empty. In this case ** delete the contents of all three tables and throw away any @@ -117140,12 +115427,12 @@ rc = fts3PendingTermsDocid(p, iRemove); fts3DeleteTerms(&rc, p, apVal, aSzDel); fts3SqlExec(&rc, p, SQL_DELETE_CONTENT, apVal); if( p->bHasDocsize ){ fts3SqlExec(&rc, p, SQL_DELETE_DOCSIZE, apVal); + nChng--; } - nChng--; } } }else if( sqlite3_value_type(apVal[p->nColumn+2])!=SQLITE_NULL ){ sqlite3_free(aSzIns); return fts3SpecialInsert(p, apVal[p->nColumn+2]); @@ -117159,21 +115446,20 @@ } if( rc==SQLITE_OK ){ rc = fts3InsertTerms(p, apVal, aSzIns); } if( p->bHasDocsize ){ + nChng++; fts3InsertDocsize(&rc, p, aSzIns); } - nChng++; } - if( p->bHasStat ){ + if( p->bHasDocsize ){ fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nChng); } sqlite3_free(aSzIns); - sqlite3Fts3SegmentsClose(p); return rc; } /* ** Flush any data in the pending-terms hash table to disk. If successful, @@ -117193,11 +115479,10 @@ }else{ sqlite3_exec(p->db, "ROLLBACK TO fts3", 0, 0, 0); sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0); } } - sqlite3Fts3SegmentsClose(p); return rc; } #endif @@ -117224,11 +115509,11 @@ ** Used as an fts3ExprIterate() context when loading phrase doclists to ** Fts3Expr.aDoclist[]/nDoclist. */ typedef struct LoadDoclistCtx LoadDoclistCtx; struct LoadDoclistCtx { - Fts3Cursor *pCsr; /* FTS3 Cursor */ + Fts3Table *pTab; /* FTS3 Table */ int nPhrase; /* Number of phrases seen so far */ int nToken; /* Number of tokens seen so far */ }; /* @@ -117418,11 +115703,11 @@ p->nPhrase++; p->nToken += pExpr->pPhrase->nToken; if( pExpr->isLoaded==0 ){ - rc = sqlite3Fts3ExprLoadDoclist(p->pCsr, pExpr); + rc = sqlite3Fts3ExprLoadDoclist(p->pTab, pExpr); pExpr->isLoaded = 1; if( rc==SQLITE_OK ){ rc = fts3ExprNearTrim(pExpr); } } @@ -117461,11 +115746,11 @@ int *pnPhrase, /* OUT: Number of phrases in query */ int *pnToken /* OUT: Number of tokens in query */ ){ int rc; /* Return Code */ LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */ - sCtx.pCsr = pCsr; + sCtx.pTab = (Fts3Table *)pCsr->base.pVtab; rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb1, (void *)&sCtx); if( rc==SQLITE_OK ){ (void)fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb2, 0); } if( pnPhrase ) *pnPhrase = sCtx.nPhrase; @@ -117992,51 +116277,24 @@ Fts3Expr *pExpr, /* Phrase expression node */ int iPhrase, /* Phrase number (numbered from zero) */ void *pCtx /* Pointer to MatchInfo structure */ ){ MatchInfo *p = (MatchInfo *)pCtx; - Fts3Cursor *pCsr = p->pCursor; - char *pIter; + char *pCsr; char *pEnd; - char *pFree = 0; const int iStart = 2 + (iPhrase * p->nCol * 3) + 1; assert( pExpr->isLoaded ); - assert( pExpr->eType==FTSQUERY_PHRASE ); - - if( pCsr->pDeferred ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - int ii; - for(ii=0; iinToken; ii++){ - if( pPhrase->aToken[ii].bFulltext ) break; - } - if( iinToken ){ - int nFree = 0; - int rc = sqlite3Fts3ExprLoadFtDoclist(pCsr, pExpr, &pFree, &nFree); - if( rc!=SQLITE_OK ) return rc; - pIter = pFree; - pEnd = &pFree[nFree]; - }else{ - int nDoc = p->aMatchinfo[2 + 3*p->nCol*p->aMatchinfo[0]]; - for(ii=0; iinCol; ii++){ - p->aMatchinfo[iStart + ii*3] = nDoc; - p->aMatchinfo[iStart + ii*3 + 1] = nDoc; - } - return SQLITE_OK; - } - }else{ - pIter = pExpr->aDoclist; - pEnd = &pExpr->aDoclist[pExpr->nDoclist]; - } /* Fill in the global hit count matrix row for this phrase. */ - while( pIteraMatchinfo[iStart], 1); + pCsr = pExpr->aDoclist; + pEnd = &pExpr->aDoclist[pExpr->nDoclist]; + while( pCsraMatchinfo[iStart], 1); } - sqlite3_free(pFree); return SQLITE_OK; } /* ** fts3ExprIterate() callback used to collect the "local" matchinfo stats @@ -118100,19 +116358,20 @@ sInfo.aMatchinfo = (u32 *)sqlite3_malloc(sizeof(u32)*nMatchinfo); if( !sInfo.aMatchinfo ){ return SQLITE_NOMEM; } memset(sInfo.aMatchinfo, 0, sizeof(u32)*nMatchinfo); + /* First element of match-info is the number of phrases in the query */ sInfo.aMatchinfo[0] = nPhrase; sInfo.aMatchinfo[1] = sInfo.nCol; + (void)fts3ExprIterate(pCsr->pExpr, fts3ExprGlobalMatchinfoCb,(void*)&sInfo); if( pTab->bHasDocsize ){ int ofst = 2 + 3*sInfo.aMatchinfo[0]*sInfo.aMatchinfo[1]; rc = sqlite3Fts3MatchinfoDocsizeGlobal(pCsr, &sInfo.aMatchinfo[ofst]); } - (void)fts3ExprIterate(pCsr->pExpr, fts3ExprGlobalMatchinfoCb,(void*)&sInfo); pCsr->aMatchinfo = sInfo.aMatchinfo; pCsr->isMatchinfoNeeded = 1; } sInfo.aMatchinfo = pCsr->aMatchinfo; @@ -118218,11 +116477,10 @@ i, (i==nSnippet-1), nFToken, zStart, zEnd, zEllipsis, &res ); } snippet_out: - sqlite3Fts3SegmentsClose(pTab); if( rc!=SQLITE_OK ){ sqlite3_result_error_code(pCtx, rc); sqlite3_free(res.z); }else{ sqlite3_result_text(pCtx, res.z, -1, sqlite3_free); @@ -118398,11 +116656,10 @@ } offsets_out: sqlite3_free(sCtx.aTerm); assert( rc!=SQLITE_DONE ); - sqlite3Fts3SegmentsClose(pTab); if( rc!=SQLITE_OK ){ sqlite3_result_error_code(pCtx, rc); sqlite3_free(res.z); }else{ sqlite3_result_text(pCtx, res.z, res.n-1, sqlite3_free); @@ -118418,11 +116675,10 @@ if( !pCsr->pExpr ){ sqlite3_result_blob(pContext, "", 0, SQLITE_STATIC); return; } rc = fts3GetMatchinfo(pCsr); - sqlite3Fts3SegmentsClose((Fts3Table *)pCsr->base.pVtab ); if( rc!=SQLITE_OK ){ sqlite3_result_error_code(pContext, rc); }else{ Fts3Table *pTab = (Fts3Table*)pCsr->base.pVtab; int n = sizeof(u32)*(2+pCsr->aMatchinfo[0]*pCsr->aMatchinfo[1]*3); Index: src/sqlite3.h ================================================================== --- src/sqlite3.h +++ src/sqlite3.h @@ -105,13 +105,13 @@ ** ** See also: [sqlite3_libversion()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.7.4" -#define SQLITE_VERSION_NUMBER 3007004 -#define SQLITE_SOURCE_ID "2010-11-16 23:10:26 fd5b2f23dd5111d2f0934dd828bae36b755024c1" +#define SQLITE_VERSION "3.7.3" +#define SQLITE_VERSION_NUMBER 3007003 +#define SQLITE_SOURCE_ID "2010-10-07 13:29:13 e55ada89246d4cc5f476891c70572dc7c1c3643e" /* ** CAPI3REF: Run-Time Library Version Numbers ** KEYWORDS: sqlite3_version, sqlite3_sourceid ** @@ -2631,25 +2631,10 @@ ** SQL text used to create a [prepared statement] if that statement was ** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. */ SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); -/* -** CAPI3REF: Determine If An SQL Statement Writes The Database -** -** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if -** the [prepared statement] X is guaranteed to leave the database file -** unmodified. ^If the sqlite3_stmt_readonly(X) interface returns false (zero) -** then evaluating the statement might change the database file, but this -** is not guaranteed as the write operation might be conditional and the -** condition might not be met. ^If X is a NULL pointer then -** sqlite3_stmt_readonly(X) returns true. If X is a non-NULL pointer but -** is not a pointer to a valid, unfinalized prepared statement, then the -** behavior is undefined and probably harmful. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); - /* ** CAPI3REF: Dynamically Typed Value Object ** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} ** ** SQLite uses the sqlite3_value object to represent all values @@ -2745,14 +2730,11 @@ ** ^If the fourth parameter is negative, the length of the string is ** the number of bytes up to the first zero terminator. ** ** ^The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and ** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to sqlite3_bind_blob(), -** sqlite3_bind_text(), or sqlite3_bind_text16() fails. -** ^If the fifth argument is +** string after SQLite has finished with it. ^If the fifth argument is ** the special value [SQLITE_STATIC], then SQLite assumes that the ** information is in static, unmanaged space and does not need to be freed. ** ^If the fifth argument has the value [SQLITE_TRANSIENT], then ** SQLite makes its own private copy of the data immediately, before ** the sqlite3_bind_*() routine returns. @@ -3388,19 +3370,16 @@ ** parameters. ^An aggregate SQL function requires an implementation of xStep ** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing ** SQL function or aggregate, pass NULL poiners for all three function ** callbacks. ** -** ^(If the tenth parameter to sqlite3_create_function_v2() is not NULL, -** then it is destructor for the application data pointer. -** The destructor is invoked when the function is deleted, either by being -** overloaded or when the database connection closes.)^ -** ^The destructor is also invoked if the call to -** sqlite3_create_function_v2() fails. -** ^When the destructor callback of the tenth parameter is invoked, it -** is passed a single argument which is a copy of the application data -** pointer which was the fifth parameter to sqlite3_create_function_v2(). +** ^If the tenth parameter to sqlite3_create_function_v2() is not NULL, +** then it is invoked when the function is deleted, either by being +** overloaded or when the database connection closes. +** ^When the destructure callback of the tenth parameter is invoked, it +** is passed a single argument which is a copy of the pointer which was +** the fifth parameter to sqlite3_create_function_v2(). ** ** ^It is permitted to register multiple implementations of the same ** functions with the same name but with either differing numbers of ** arguments or differing preferred text encodings. ^SQLite will use ** the implementation that most closely matches the way in which the @@ -3860,19 +3839,10 @@ ** the collating function is deleted. ** ^Collating functions are deleted when they are overridden by later ** calls to the collation creation functions or when the ** [database connection] is closed using [sqlite3_close()]. ** -** ^The xDestroy callback is not called if the -** sqlite3_create_collation_v2() function fails. Applications that invoke -** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should -** check the return code and dispose of the application data pointer -** themselves rather than expecting SQLite to deal with it for them. -** This is different from every other SQLite interface. The inconsistency -** is unfortunate but cannot be changed without breaking backwards -** compatibility. -** ** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. */ SQLITE_API int sqlite3_create_collation( sqlite3*, const char *zName, @@ -4623,13 +4593,11 @@ ** when a new virtual table is be being created or reinitialized. ** ** ^The sqlite3_create_module_v2() interface has a fifth parameter which ** is a pointer to a destructor for the pClientData. ^SQLite will ** invoke the destructor function (if it is not NULL) when SQLite -** no longer needs the pClientData pointer. ^The destructor will also -** be invoked if the call to sqlite3_create_module_v2() fails. -** ^The sqlite3_create_module() +** no longer needs the pClientData pointer. ^The sqlite3_create_module() ** interface is equivalent to sqlite3_create_module_v2() with a NULL ** destructor. */ SQLITE_API int sqlite3_create_module( sqlite3 *db, /* SQLite connection to register module with */ @@ -4808,33 +4776,10 @@ sqlite3_int64 iRow, int flags, sqlite3_blob **ppBlob ); -/* -** CAPI3REF: Move a BLOB Handle to a New Row -** -** ^This function is used to move an existing blob handle so that it points -** to a different row of the same database table. ^The new row is identified -** by the rowid value passed as the second argument. Only the row can be -** changed. ^The database, table and column on which the blob handle is open -** remain the same. Moving an existing blob handle to a new row can be -** faster than closing the existing handle and opening a new one. -** -** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - -** it must exist and there must be either a blob or text value stored in -** the nominated column.)^ ^If the new row is not present in the table, or if -** it does not contain a blob or text value, or if another error occurs, an -** SQLite error code is returned and the blob handle is considered aborted. -** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or -** [sqlite3_blob_reopen()] on an aborted blob handle immediately return -** SQLITE_ABORT. -** -** ^This function sets the database handle error code and message. -*/ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); - /* ** CAPI3REF: Close A BLOB Handle ** ** ^Closes an open [BLOB handle]. ** Index: src/style.c ================================================================== --- src/style.c +++ src/style.c @@ -89,11 +89,11 @@ va_end(ap); cgi_destination(CGI_HEADER); cgi_printf("%s", ""); + " \"http://www.x3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">"); if( g.thTrace ) Th_Trace("BEGIN_HEADER
\n", -1); /* Generate the header up through the main menu */ Th_Store("project_name", db_get("project-name","Unnamed Fossil Project")); Index: src/sync.c ================================================================== --- src/sync.c +++ src/sync.c @@ -32,43 +32,39 @@ /* ** If the respository is configured for autosyncing, then do an ** autosync. This will be a pull if the argument is true or a push ** if the argument is false. -** -** Return the number of errors. */ -int autosync(int flags){ +void autosync(int flags){ const char *zUrl; const char *zAutosync; const char *zPw; - int rc; int configSync = 0; /* configuration changes transferred */ if( g.fNoSync ){ - return 0; + return; } zAutosync = db_get("autosync", 0); if( zAutosync ){ if( (flags & AUTOSYNC_PUSH)!=0 && memcmp(zAutosync,"pull",4)==0 ){ - return 0; /* Do not auto-push when autosync=pullonly */ + return; /* Do not auto-push when autosync=pullonly */ } if( is_false(zAutosync) ){ - return 0; /* Autosync is completely off */ + return; /* Autosync is completely off */ } }else{ /* Autosync defaults on. To make it default off, "return" here. */ } zUrl = db_get("last-sync-url", 0); if( zUrl==0 ){ - return 0; /* No default server */ + return; /* No default server */ } zPw = unobscure(db_get("last-sync-pw", 0)); url_parse(zUrl); if( g.urlUser!=0 && g.urlPasswd==0 ){ g.urlPasswd = mprintf("%s", zPw); } -#if 0 /* Disabled for now */ if( (flags & AUTOSYNC_PULL)!=0 && db_get_boolean("auto-shun",1) ){ /* When doing an automatic pull, also automatically pull shuns from ** the server if pull_shuns is enabled. ** ** TODO: What happens if the shun list gets really big? @@ -75,16 +71,13 @@ ** Maybe the shunning list should only be pulled on every 10th ** autosync, or something? */ configSync = CONFIGSET_SHUN; } -#endif printf("Autosync: %s\n", g.urlCanonical); url_enable_proxy("via proxy: "); - rc = client_sync((flags & AUTOSYNC_PUSH)!=0, 1, 0, configSync, 0); - if( rc ) fossil_warning("Autosync failed"); - return rc; + client_sync((flags & AUTOSYNC_PUSH)!=0, 1, 0, configSync, 0); } /* ** This routine processes the command-line argument for push, pull, ** and sync. If a command-line argument is given, that is the URL @@ -110,21 +103,21 @@ if( zUrl==0 ){ if( urlOptional ) fossil_exit(0); usage("URL"); } url_parse(zUrl); + if( !g.dontKeepUrl ){ + db_set("last-sync-url", g.urlCanonical, 0); + if( g.urlPasswd ) db_set("last-sync-pw", obscure(g.urlPasswd), 0); + } if( g.urlUser!=0 && g.urlPasswd==0 ){ if( zPw==0 ){ url_prompt_for_password(); }else{ g.urlPasswd = mprintf("%s", zPw); } } - if( !g.dontKeepUrl ){ - db_set("last-sync-url", g.urlCanonical, 0); - if( g.urlPasswd ) db_set("last-sync-pw", obscure(g.urlPasswd), 0); - } user_select(); if( g.argc==2 ){ printf("Server: %s\n", g.urlCanonical); } url_enable_proxy("via proxy: "); Index: src/timeline.c ================================================================== --- src/timeline.c +++ src/timeline.c @@ -196,11 +196,11 @@ ** a technical div for the timeline graph */ @
} - @ + @
blob_zero(&comment); while( db_step(pQuery)==SQLITE_ROW ){ int rid = db_column_int(pQuery, 0); const char *zUuid = db_column_text(pQuery, 1); int isLeaf = db_column_int(pQuery, 5); @@ -488,11 +488,11 @@ @ function renderGraph(){ @ var canvasDiv = document.getElementById("canvas"); @ while( canvasDiv.hasChildNodes() ){ @ canvasDiv.removeChild(canvasDiv.firstChild); @ } - @ var canvasY = absoluteY("timelineTable"); + @ var canvasY = absoluteY("canvas"); @ var left = absoluteX(rowinfo[0].id) - absoluteX("canvas") + 15; @ var width = nrail*20; @ for(var i in rowinfo){ @ rowinfo[i].y = absoluteY(rowinfo[i].id) + 10 - canvasY; @ rowinfo[i].x = left + rowinfo[i].r*20; @@ -552,12 +552,11 @@ @ isleaf BOOLEAN, @ bgcolor TEXT, @ etype TEXT, @ taglist TEXT, @ tagid INTEGER, - @ short TEXT, - @ sortby REAL + @ short TEXT @ ) ; db_multi_exec(zSql); } @@ -584,12 +583,11 @@ @ event.type, @ (SELECT group_concat(substr(tagname,5), ', ') FROM tag, tagxref @ WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid @ AND tagxref.rid=blob.rid AND tagxref.tagtype>0), @ tagid, - @ brief, - @ event.mtime + @ brief @ FROM event JOIN blob @ WHERE blob.rid=event.objid ; if( zBase==0 ){ zBase = mprintf(zBaseSql, TAG_BRANCH, TAG_BRANCH); @@ -617,17 +615,17 @@ ** "timeline" table to cause
to be inserted before and after ** entries of that date. */ static void timeline_add_dividers(const char *zDate){ db_multi_exec( - "INSERT INTO timeline(rid,sortby,etype)" - "VALUES(-1,julianday(%Q,'utc')-5.0e-6,'div')", + "INSERT INTO timeline(rid,timestamp,etype)" + "VALUES(-1,datetime(%Q,'-1 second') || '.9','div')", zDate ); db_multi_exec( - "INSERT INTO timeline(rid,sortby,etype)" - "VALUES(-2,julianday(%Q,'utc')+5.0e-6,'div')", + "INSERT INTO timeline(rid,timestamp,etype)" + "VALUES(-2,datetime(%Q) || '.1','div')", zDate ); } @@ -949,11 +947,11 @@ } if( P("showsql") ){ @
%h(blob_str(&sql))
} blob_zero(&sql); - db_prepare(&q, "SELECT * FROM timeline ORDER BY sortby DESC /*scan*/"); + db_prepare(&q, "SELECT * FROM timeline ORDER BY timestamp DESC /*scan*/"); @

%b(&desc)

blob_reset(&desc); www_print_timeline(&q, tmFlags, 0); db_finalize(&q); style_footer(); @@ -1047,12 +1045,11 @@ @ FROM tag, tagxref @ WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid @ AND tagxref.rid=blob.rid AND tagxref.tagtype>0)) @ || ')', @ (SELECT count(*) FROM plink WHERE pid=blob.rid AND isprim), - @ (SELECT count(*) FROM plink WHERE cid=blob.rid), - @ event.mtime + @ (SELECT count(*) FROM plink WHERE cid=blob.rid) @ FROM event, blob @ WHERE blob.rid=event.objid ; return zBaseSql; } Index: src/tkt.c ================================================================== --- src/tkt.c +++ src/tkt.c @@ -703,11 +703,11 @@ timeline_query_for_www(), tagid, zFullUuid, zFullUuid, zFullUuid ); } db_prepare(&q, zSQL); free(zSQL); - www_print_timeline(&q, TIMELINE_ARTID|TIMELINE_DISJOINT|TIMELINE_GRAPH, 0); + www_print_timeline(&q, TIMELINE_ARTID, 0); db_finalize(&q); style_footer(); } /* @@ -845,11 +845,10 @@ ** %fossil ticket show (REPORTTITLE|REPORTNR) ?TICKETFILTER? ?options? ** ** options can be: ** ?-l|--limit LIMITCHAR? ** ?-q|--quote? -** ?-R|--repository FILE? ** ** Run the ticket report, identified by the report format title ** used in the gui. The data is written as flat file on stdout, ** using "," as separator. The seperator "," can be changed using ** the -l or --limit option. @@ -901,10 +900,11 @@ */ void ticket_cmd(void){ int n; /* do some ints, we want to be inside a checkout */ + db_must_be_within_tree(); db_find_and_open_repository(1); user_select(); /* ** Check that the user exists. */ Index: src/update.c ================================================================== --- src/update.c +++ src/update.c @@ -43,13 +43,11 @@ ** leaf. VERSION can also be "current" to select the leaf of the current ** version or "latest" to select the most recent check-in. ** ** If one or more FILES are listed after the VERSION then only the ** named files are candidates to be updated. If FILES is omitted, all -** files in the current checkout are subject to be updated. Using -** a directory name for one of the FILES arguments is the same as -** using every subdirectory and file beneath that directory. +** files in the current checkout are subject to be updated. ** ** The -n or --nochange option causes this command to do a "dry run". It ** prints out what would have happened but does not actually make any ** changes to the current checkout or the repository. ** @@ -71,11 +69,11 @@ db_must_be_within_tree(); vid = db_lget_int("checkout", 0); if( vid==0 ){ fossil_fatal("cannot find current version"); } - if( !nochangeFlag && db_exists("SELECT 1 FROM vmerge") ){ + if( db_exists("SELECT 1 FROM vmerge") ){ fossil_fatal("cannot update an uncommitted merge"); } if( !nochangeFlag ) autosync(AUTOSYNC_PULL); if( g.argc>=3 ){ @@ -113,11 +111,11 @@ tid = db_int(0, "SELECT rid FROM leaves, event" " WHERE event.objid=leaves.rid" " ORDER BY event.mtime DESC"); } - if( !verboseFlag && (tid==vid)) return; /* Nothing to update */ + if( tid==vid ) return; /* Nothing to update */ db_begin_transaction(); vfile_check_signature(vid, 1); if( !nochangeFlag ) undo_begin(); load_vfile_from_rid(tid); @@ -168,37 +166,27 @@ ); } db_finalize(&q); /* If FILES appear on the command-line, remove from the "fv" table - ** every entry that is not named on the command-line or which is not - ** in a directory named on the command-line. + ** every entry that is not named on the command-line. */ if( g.argc>=4 ){ Blob sql; /* SQL statement to purge unwanted entries */ + char *zSep = "("; /* Separator in the list of filenames */ Blob treename; /* Normalized filename */ int i; /* Loop counter */ - const char *zSep; /* Term separator */ blob_zero(&sql); - blob_append(&sql, "DELETE FROM fv WHERE ", -1); - zSep = ""; + blob_append(&sql, "DELETE FROM fv WHERE fn NOT IN ", -1); for(i=3; i%B ", zSep, &treename); - } - zSep = "AND "; + blob_appendf(&sql, "%s'%q'", zSep, blob_str(&treename)); blob_reset(&treename); + zSep = ","; } + blob_append(&sql, ")", -1); db_multi_exec(blob_str(&sql)); blob_reset(&sql); } db_prepare(&q, @@ -276,21 +264,15 @@ blob_reset(&v); blob_reset(&e); blob_reset(&t); blob_reset(&r); }else if( verboseFlag ){ - if( chnged ){ - printf("EDITED %s\n", zName); - }else{ - printf("UNCHANGED %s\n", zName); - } + printf("UNCHANGED %s\n", zName); } free(zFullPath); } db_finalize(&q); - printf("--------------\n"); - show_common_info(tid, "updated-to:", 1, 0); /* ** Clean up the mid and pid VFILE entries. Then commit the changes. */ if( nochangeFlag ){ Index: src/vfile.c ================================================================== --- src/vfile.c +++ src/vfile.c @@ -25,28 +25,10 @@ #include "dirent.h" #else #include #endif -/* -** The input is guaranteed to be a 40-character well-formed UUID. -** Find its rid. -*/ -int fast_uuid_to_rid(const char *zUuid){ - static Stmt q; - int rid; - db_static_prepare(&q, "SELECT rid FROM blob WHERE uuid=:uuid"); - db_bind_text(&q, ":uuid", zUuid); - if( db_step(&q)==SQLITE_ROW ){ - rid = db_column_int(&q, 0); - }else{ - rid = 0; - } - db_reset(&q); - return rid; -} - /* ** Given a UUID, return the corresponding record ID. If the UUID ** does not exist, then return 0. ** ** For this routine, the UUID must be exact. For a match against @@ -55,19 +37,27 @@ ** If the UUID is not found and phantomize is 1, then attempt to ** create a phantom record. */ int uuid_to_rid(const char *zUuid, int phantomize){ int rid, sz; + static Stmt q; char z[UUID_SIZE+1]; sz = strlen(zUuid); if( sz!=UUID_SIZE || !validate16(zUuid, sz) ){ return 0; } strcpy(z, zUuid); canonical16(z, sz); - rid = fast_uuid_to_rid(z); + db_static_prepare(&q, "SELECT rid FROM blob WHERE uuid=:uuid"); + db_bind_text(&q, ":uuid", z); + if( db_step(&q)==SQLITE_ROW ){ + rid = db_column_int(&q, 0); + }else{ + rid = 0; + } + db_reset(&q); if( rid==0 && phantomize ){ rid = content_new(zUuid); } return rid; } @@ -388,57 +378,10 @@ } db_finalize(&q); md5sum_finish(pOut); } -/* -** Do a file-by-file comparison of the content of the repository and -** the working check-out on disk. Report any errors. -*/ -void vfile_compare_repository_to_disk(int vid){ - int rc; - Stmt q; - Blob disk, repo; - - db_must_be_within_tree(); - db_prepare(&q, - "SELECT %Q || pathname, pathname, rid FROM vfile" - " WHERE NOT deleted AND vid=%d AND file_is_selected(id)", - g.zLocalRoot, vid - ); - md5sum_init(); - while( db_step(&q)==SQLITE_ROW ){ - const char *zFullpath = db_column_text(&q, 0); - const char *zName = db_column_text(&q, 1); - int rid = db_column_int(&q, 2); - - blob_zero(&disk); - rc = blob_read_from_file(&disk, zFullpath); - if( rc<0 ){ - printf("ERROR: cannot read file [%s]\n", zFullpath); - blob_reset(&disk); - continue; - } - blob_zero(&repo); - content_get(rid, &repo); - if( blob_size(&repo)!=blob_size(&disk) ){ - printf("ERROR: [%s] is %d bytes on disk but %d in the repository\n", - zName, blob_size(&disk), blob_size(&repo)); - blob_reset(&disk); - blob_reset(&repo); - continue; - } - if( blob_compare(&repo, &disk) ){ - printf("ERROR: [%s] is different on disk compared to the repository\n", - zName); - } - blob_reset(&disk); - blob_reset(&repo); - } - db_finalize(&q); -} - /* ** Compute an aggregate MD5 checksum over the repository image of every ** file in vid. The file names are part of the checksum. ** ** Return the resulting checksum in blob pOut. Index: src/wiki.c ================================================================== --- src/wiki.c +++ src/wiki.c @@ -86,14 +86,14 @@ if( !g.okRdWiki ){ cgi_redirectf("%s/login?g=%s/home", g.zBaseURL, g.zBaseURL); } if( zIndexPage ){ const char *zPathInfo = P("PATH_INFO"); - while( zIndexPage[0]=='/' ) zIndexPage++; if( strcmp(zIndexPage, zPathInfo)==0 ) zIndexPage = 0; } if( zIndexPage ){ + while( zIndexPage[0]=='/' ) zIndexPage++; cgi_redirectf("%s/%s", g.zBaseURL, zIndexPage); } if( zPageName ){ login_check_credentials(); g.zExtra = zPageName; Index: src/winhttp.c ================================================================== --- src/winhttp.c +++ src/winhttp.c @@ -106,14 +106,14 @@ wanted -= got; } fclose(out); out = 0; sprintf(zCmd, "\"%s\" http \"%s\" %s %s %s%s", - fossil_nameofexe(), g.zRepositoryName, zRequestFName, zReplyFName, + _pgmptr, g.zRepositoryName, zRequestFName, zReplyFName, inet_ntoa(p->addr.sin_addr), p->zNotFound ); - fossil_system(zCmd); + portable_system(zCmd); in = fopen(zReplyFName, "rb"); if( in ){ while( (got = fread(zHdr, 1, sizeof(zHdr), in))>0 ){ send(p->s, zHdr, got, 0); } @@ -190,11 +190,11 @@ zTempPrefix = mprintf("fossil_server_P%d_", iPort); printf("Listening for HTTP requests on TCP port %d\n", iPort); if( zBrowser ){ zBrowser = mprintf(zBrowser, iPort); printf("Launch webbrowser: %s\n", zBrowser); - fossil_system(zBrowser); + portable_system(zBrowser); } printf("Type Ctrl-C to stop the HTTP server\n"); for(;;){ SOCKET client; SOCKADDR_IN client_addr; Index: src/xfer.c ================================================================== --- src/xfer.c +++ src/xfer.c @@ -454,10 +454,15 @@ fprintf(stderr, "# login [%s] with capabilities [%s]\n", g.zLogin,zCap); } } } db_finalize(&q); + + if( rc==0 ){ + /* If the login was successful. */ + login_set_anon_nobody_capabilities(); + } return rc; } /* ** Send the content of all files in the unsent table. @@ -626,12 +631,10 @@ char *zNow; if( strcmp(PD("REQUEST_METHOD","POST"),"POST") ){ fossil_redirect_home(); } - g.zLogin = "anonymous"; - login_set_anon_nobody_capabilities(); memset(&xfer, 0, sizeof(xfer)); blobarray_zero(xfer.aToken, count(xfer.aToken)); cgi_set_content_type(g.zContentType); blob_zero(&xfer.err); xfer.pIn = &g.cgiIn; @@ -975,11 +978,11 @@ ** ** Records are pushed to the server if pushFlag is true. Records ** are pulled if pullFlag is true. A full sync occurs if both are ** true. */ -int client_sync( +void client_sync( int pushFlag, /* True to do a push (or a sync) */ int pullFlag, /* True to do a pull (or a sync) */ int cloneFlag, /* True if this is a clone */ int configRcvMask, /* Receive these configuration items */ int configSendMask /* Send these configuration items */ @@ -1002,15 +1005,14 @@ int pctDone; /* Percentage done with a message */ int lastPctDone = -1; /* Last displayed pctDone */ double rArrivalTime; /* Time at which a message arrived */ const char *zSCode = db_get("server-code", "x"); const char *zPCode = db_get("project-code", 0); - int nErr = 0; /* Number of errors */ if( db_get_boolean("dont-push", 0) ) pushFlag = 0; if( pushFlag + pullFlag + cloneFlag == 0 - && configRcvMask==0 && configSendMask==0 ) return 0; + && configRcvMask==0 && configSendMask==0 ) return; transport_stats(0, 0, 1); socket_global_init(); memset(&xfer, 0, sizeof(xfer)); xfer.pIn = &recv; @@ -1127,14 +1129,11 @@ xfer.nIGotSent = 0; if( !g.cgiOutput && !g.fQuiet ){ printf("waiting for server..."); } fflush(stdout); - if( http_exchange(&send, &recv, cloneFlag==0 || nCycle>0) ){ - nErr++; - break; - } + http_exchange(&send, &recv, cloneFlag==0 || nCycle>0); lastPctDone = -1; blob_reset(&send); rArrivalTime = db_double(0.0, "SELECT julianday('now')"); /* Begin constructing the next message (which might never be @@ -1356,33 +1355,27 @@ go = 1; } }else{ blob_appendf(&xfer.err, "\rserver says: %s", zMsg); } - fossil_warning("\rError: %s", zMsg); - nErr++; - break; + fossil_fatal("\rError: %s", zMsg); } }else /* Unknown message */ { if( blob_str(&xfer.aToken[0])[0]=='<' ){ - fossil_warning( + fossil_fatal( "server replies with HTML instead of fossil sync protocol:\n%b", &recv ); - nErr++; - break; } blob_appendf(&xfer.err, "unknown command: %b", &xfer.aToken[0]); } if( blob_size(&xfer.err) ){ - fossil_warning("%b", &xfer.err); - nErr++; - break; + fossil_fatal("%b", &xfer.err); } blobarray_reset(xfer.aToken, xfer.nToken); blob_reset(&xfer.line); } if( origConfigRcvMask & (CONFIGSET_TKT|CONFIGSET_USER) ){ @@ -1433,7 +1426,6 @@ transport_global_shutdown(); db_multi_exec("DROP TABLE onremote"); manifest_crosslink_end(); content_enable_dephantomize(1); db_end_transaction(0); - return nErr; } Index: win/Makefile.dmc ================================================================== --- win/Makefile.dmc +++ win/Makefile.dmc @@ -24,13 +24,13 @@ CFLAGS = -o BCC = $(DMDIR)\bin\dmc $(CFLAGS) TCC = $(DMDIR)\bin\dmc $(CFLAGS) $(DMCDEF) $(I18N) $(SSL) $(INCL) LIBS = $(DMDIR)\extra\lib\ zlib wsock32 -SRC = add_.c allrepo_.c attach_.c bag_.c blob_.c branch_.c browse_.c captcha_.c cgi_.c checkin_.c checkout_.c clearsign_.c clone_.c comformat_.c configure_.c content_.c db_.c delta_.c deltacmd_.c descendants_.c diff_.c diffcmd_.c doc_.c encode_.c event_.c export_.c file_.c finfo_.c graph_.c http_.c http_socket_.c http_ssl_.c http_transport_.c info_.c import_.c login_.c main_.c manifest_.c md5_.c merge_.c merge3_.c name_.c pivot_.c popen_.c pqueue_.c printf_.c rebuild_.c report_.c rss_.c schema_.c search_.c setup_.c sha1_.c shun_.c skins_.c stat_.c style_.c sync_.c tag_.c th_main_.c timeline_.c tkt_.c tktsetup_.c undo_.c update_.c url_.c user_.c verify_.c vfile_.c wiki_.c wikiformat_.c winhttp_.c xfer_.c zip_.c +SRC = add_.c allrepo_.c attach_.c bag_.c blob_.c branch_.c browse_.c captcha_.c cgi_.c checkin_.c checkout_.c clearsign_.c clone_.c comformat_.c configure_.c content_.c db_.c delta_.c deltacmd_.c descendants_.c diff_.c diffcmd_.c doc_.c encode_.c event_.c file_.c finfo_.c graph_.c http_.c http_socket_.c http_ssl_.c http_transport_.c info_.c login_.c main_.c manifest_.c md5_.c merge_.c merge3_.c name_.c pivot_.c popen_.c pqueue_.c printf_.c rebuild_.c report_.c rss_.c schema_.c search_.c setup_.c sha1_.c shun_.c skins_.c stat_.c style_.c sync_.c tag_.c th_main_.c timeline_.c tkt_.c tktsetup_.c undo_.c update_.c url_.c user_.c verify_.c vfile_.c wiki_.c wikiformat_.c winhttp_.c xfer_.c zip_.c -OBJ = $(OBJDIR)\add$O $(OBJDIR)\allrepo$O $(OBJDIR)\attach$O $(OBJDIR)\bag$O $(OBJDIR)\blob$O $(OBJDIR)\branch$O $(OBJDIR)\browse$O $(OBJDIR)\captcha$O $(OBJDIR)\cgi$O $(OBJDIR)\checkin$O $(OBJDIR)\checkout$O $(OBJDIR)\clearsign$O $(OBJDIR)\clone$O $(OBJDIR)\comformat$O $(OBJDIR)\configure$O $(OBJDIR)\content$O $(OBJDIR)\db$O $(OBJDIR)\delta$O $(OBJDIR)\deltacmd$O $(OBJDIR)\descendants$O $(OBJDIR)\diff$O $(OBJDIR)\diffcmd$O $(OBJDIR)\doc$O $(OBJDIR)\encode$O $(OBJDIR)\event$O $(OBJDIR)\export$O $(OBJDIR)\file$O $(OBJDIR)\finfo$O $(OBJDIR)\graph$O $(OBJDIR)\http$O $(OBJDIR)\http_socket$O $(OBJDIR)\http_ssl$O $(OBJDIR)\http_transport$O $(OBJDIR)\import$O $(OBJDIR)\info$O $(OBJDIR)\login$O $(OBJDIR)\main$O $(OBJDIR)\manifest$O $(OBJDIR)\md5$O $(OBJDIR)\merge$O $(OBJDIR)\merge3$O $(OBJDIR)\name$O $(OBJDIR)\pivot$O $(OBJDIR)\popen$O $(OBJDIR)\pqueue$O $(OBJDIR)\printf$O $(OBJDIR)\rebuild$O $(OBJDIR)\report$O $(OBJDIR)\rss$O $(OBJDIR)\schema$O $(OBJDIR)\search$O $(OBJDIR)\setup$O $(OBJDIR)\sha1$O $(OBJDIR)\shun$O $(OBJDIR)\skins$O $(OBJDIR)\stat$O $(OBJDIR)\style$O $(OBJDIR)\sync$O $(OBJDIR)\tag$O $(OBJDIR)\th_main$O $(OBJDIR)\timeline$O $(OBJDIR)\tkt$O $(OBJDIR)\tktsetup$O $(OBJDIR)\undo$O $(OBJDIR)\update$O $(OBJDIR)\url$O $(OBJDIR)\user$O $(OBJDIR)\verify$O $(OBJDIR)\vfile$O $(OBJDIR)\wiki$O $(OBJDIR)\wikiformat$O $(OBJDIR)\winhttp$O $(OBJDIR)\xfer$O $(OBJDIR)\zip$O $(OBJDIR)\sqlite3$O $(OBJDIR)\shell$O $(OBJDIR)\th$O $(OBJDIR)\th_lang$O +OBJ = $(OBJDIR)\add$O $(OBJDIR)\allrepo$O $(OBJDIR)\attach$O $(OBJDIR)\bag$O $(OBJDIR)\blob$O $(OBJDIR)\branch$O $(OBJDIR)\browse$O $(OBJDIR)\captcha$O $(OBJDIR)\cgi$O $(OBJDIR)\checkin$O $(OBJDIR)\checkout$O $(OBJDIR)\clearsign$O $(OBJDIR)\clone$O $(OBJDIR)\comformat$O $(OBJDIR)\configure$O $(OBJDIR)\content$O $(OBJDIR)\db$O $(OBJDIR)\delta$O $(OBJDIR)\deltacmd$O $(OBJDIR)\descendants$O $(OBJDIR)\diff$O $(OBJDIR)\diffcmd$O $(OBJDIR)\doc$O $(OBJDIR)\encode$O $(OBJDIR)\event$O $(OBJDIR)\file$O $(OBJDIR)\finfo$O $(OBJDIR)\graph$O $(OBJDIR)\http$O $(OBJDIR)\http_socket$O $(OBJDIR)\http_ssl$O $(OBJDIR)\http_transport$O $(OBJDIR)\info$O $(OBJDIR)\login$O $(OBJDIR)\main$O $(OBJDIR)\manifest$O $(OBJDIR)\md5$O $(OBJDIR)\merge$O $(OBJDIR)\merge3$O $(OBJDIR)\name$O $(OBJDIR)\pivot$O $(OBJDIR)\popen$O $(OBJDIR)\pqueue$O $(OBJDIR)\printf$O $(OBJDIR)\rebuild$O $(OBJDIR)\report$O $(OBJDIR)\rss$O $(OBJDIR)\schema$O $(OBJDIR)\search$O $(OBJDIR)\setup$O $(OBJDIR)\sha1$O $(OBJDIR)\shun$O $(OBJDIR)\skins$O $(OBJDIR)\stat$O $(OBJDIR)\style$O $(OBJDIR)\sync$O $(OBJDIR)\tag$O $(OBJDIR)\th_main$O $(OBJDIR)\timeline$O $(OBJDIR)\tkt$O $(OBJDIR)\tktsetup$O $(OBJDIR)\undo$O $(OBJDIR)\update$O $(OBJDIR)\url$O $(OBJDIR)\user$O $(OBJDIR)\verify$O $(OBJDIR)\vfile$O $(OBJDIR)\wiki$O $(OBJDIR)\wikiformat$O $(OBJDIR)\winhttp$O $(OBJDIR)\xfer$O $(OBJDIR)\zip$O $(OBJDIR)\sqlite3$O $(OBJDIR)\th$O $(OBJDIR)\th_lang$O RC=$(DMDIR)\bin\rcc RCFLAGS=-32 -w1 -I$(SRCDIR) /D__DMC__ APPNAME = $(OBJDIR)\fossil$(E) @@ -43,11 +43,11 @@ fossil.res: $B\win\fossil.rc $(RC) $(RCFLAGS) -o$@ $** $(OBJDIR)\link: $B\win\Makefile.dmc - +echo add allrepo attach bag blob branch browse captcha cgi checkin checkout clearsign clone comformat configure content db delta deltacmd descendants diff diffcmd doc encode event export file finfo graph http http_socket http_ssl http_transport import info login main manifest md5 merge merge3 name pivot popen pqueue printf rebuild report rss schema search setup sha1 shun skins stat style sync tag th_main timeline tkt tktsetup undo update url user verify vfile wiki wikiformat winhttp xfer zip sqlite3 shell th th_lang > $@ + +echo add allrepo attach bag blob branch browse captcha cgi checkin checkout clearsign clone comformat configure content db delta deltacmd descendants diff diffcmd doc encode event file finfo graph http http_socket http_ssl http_transport info login main manifest md5 merge merge3 name pivot popen pqueue printf rebuild report rss schema search setup sha1 shun skins stat style sync tag th_main timeline tkt tktsetup undo update url user verify vfile wiki wikiformat winhttp xfer zip sqlite3 th th_lang > $@ +echo fossil >> $@ +echo fossil >> $@ +echo $(LIBS) >> $@ +echo. >> $@ +echo fossil >> $@ @@ -64,13 +64,10 @@ $(BCC) -o$@ $** version$E: $B\win\version.c $(BCC) -o$@ $** -$(OBJDIR)\shell$O : $(SRCDIR)\shell.c - $(TCC) -o$@ -c -Dmain=sqlite3_shell -DSQLITE_OMIT_LOAD_EXTENSION=1 $** - $(OBJDIR)\sqlite3$O : $(SRCDIR)\sqlite3.c $(TCC) -o$@ -c -DSQLITE_OMIT_LOAD_EXTENSION=1 -DSQLITE_THREADSAFE=0 -DSQLITE_DEFAULT_FILE_FORMAT=4 -Dlocaltime=fossil_localtime -DSQLITE_ENABLE_LOCKING_STYLE=0 $** $(OBJDIR)\th$O : $(SRCDIR)\th.c $(TCC) -o$@ -c $** @@ -234,21 +231,21 @@ $(TCC) -o$@ -c event_.c event_.c : $(SRCDIR)\event.c +translate$E $** > $@ -$(OBJDIR)\export$O : export_.c export.h - $(TCC) -o$@ -c export_.c - -export_.c : $(SRCDIR)\export.c - +translate$E $** > $@ - $(OBJDIR)\encode$O : encode_.c encode.h $(TCC) -o$@ -c encode_.c encode_.c : $(SRCDIR)\encode.c +translate$E $** > $@ + +$(OBJDIR)\event$O : event_.c event.h + $(TCC) -o$@ -c event_.c + +event_.c : $(SRCDIR)\event.c + +translate$E $** > $@ $(OBJDIR)\file$O : file_.c file.h $(TCC) -o$@ -c file_.c file_.c : $(SRCDIR)\file.c @@ -288,16 +285,10 @@ $(TCC) -o$@ -c http_transport_.c http_transport_.c : $(SRCDIR)\http_transport.c +translate$E $** > $@ -$(OBJDIR)\import$O : import_.c import.h - $(TCC) -o$@ -c import_.c - -import_.c : $(SRCDIR)\import.c - +translate$E $** > $@ - $(OBJDIR)\info$O : info_.c info.h $(TCC) -o$@ -c info_.c info_.c : $(SRCDIR)\info.c +translate$E $** > $@ @@ -535,7 +526,7 @@ zip_.c : $(SRCDIR)\zip.c +translate$E $** > $@ headers: makeheaders$E page_index.h VERSION.h - +makeheaders$E add_.c:add.h allrepo_.c:allrepo.h attach_.c:attach.h bag_.c:bag.h blob_.c:blob.h branch_.c:branch.h browse_.c:browse.h captcha_.c:captcha.h cgi_.c:cgi.h checkin_.c:checkin.h checkout_.c:checkout.h clearsign_.c:clearsign.h clone_.c:clone.h comformat_.c:comformat.h configure_.c:configure.h content_.c:content.h db_.c:db.h delta_.c:delta.h deltacmd_.c:deltacmd.h descendants_.c:descendants.h diff_.c:diff.h diffcmd_.c:diffcmd.h doc_.c:doc.h encode_.c:encode.h event_.c:event.h export_.c:export.h file_.c:file.h finfo_.c:finfo.h graph_.c:graph.h http_.c:http.h http_socket_.c:http_socket.h http_ssl_.c:http_ssl.h http_transport_.c:http_transport.h import_.c:import.h info_.c:info.h login_.c:login.h main_.c:main.h manifest_.c:manifest.h md5_.c:md5.h merge_.c:merge.h merge3_.c:merge3.h name_.c:name.h pivot_.c:pivot.h popen_.c:popen.h pqueue_.c:pqueue.h printf_.c:printf.h rebuild_.c:rebuild.h report_.c:report.h rss_.c:rss.h schema_.c:schema.h search_.c:search.h setup_.c:setup.h sha1_.c:sha1.h shun_.c:shun.h skins_.c:skins.h stat_.c:stat.h style_.c:style.h sync_.c:sync.h tag_.c:tag.h th_main_.c:th_main.h timeline_.c:timeline.h tkt_.c:tkt.h tktsetup_.c:tktsetup.h undo_.c:undo.h update_.c:update.h url_.c:url.h user_.c:user.h verify_.c:verify.h vfile_.c:vfile.h wiki_.c:wiki.h wikiformat_.c:wikiformat.h winhttp_.c:winhttp.h xfer_.c:xfer.h zip_.c:zip.h $(SRCDIR)\sqlite3.h $(SRCDIR)\th.h VERSION.h + +makeheaders$E add_.c:add.h allrepo_.c:allrepo.h attach_.c:attach.h bag_.c:bag.h blob_.c:blob.h branch_.c:branch.h browse_.c:browse.h captcha_.c:captcha.h cgi_.c:cgi.h checkin_.c:checkin.h checkout_.c:checkout.h clearsign_.c:clearsign.h clone_.c:clone.h comformat_.c:comformat.h configure_.c:configure.h content_.c:content.h db_.c:db.h delta_.c:delta.h deltacmd_.c:deltacmd.h descendants_.c:descendants.h diff_.c:diff.h diffcmd_.c:diffcmd.h doc_.c:doc.h encode_.c:encode.h event_.c:event.h file_.c:file.h finfo_.c:finfo.h graph_.c:graph.h http_.c:http.h http_socket_.c:http_socket.h http_ssl_.c:http_ssl.h http_transport_.c:http_transport.h info_.c:info.h login_.c:login.h main_.c:main.h manifest_.c:manifest.h md5_.c:md5.h merge_.c:merge.h merge3_.c:merge3.h name_.c:name.h pivot_.c:pivot.h popen_.c:popen.h pqueue_.c:pqueue.h printf_.c:printf.h rebuild_.c:rebuild.h report_.c:report.h rss_.c:rss.h schema_.c:schema.h search_.c:search.h setup_.c:setup.h sha1_.c:sha1.h shun_.c:shun.h skins_.c:skins.h stat_.c:stat.h style_.c:style.h sync_.c:sync.h tag_.c:tag.h th_main_.c:th_main.h timeline_.c:timeline.h tkt_.c:tkt.h tktsetup_.c:tktsetup.h undo_.c:undo.h update_.c:update.h url_.c:url.h user_.c:user.h verify_.c:verify.h vfile_.c:vfile.h wiki_.c:wiki.h wikiformat_.c:wikiformat.h winhttp_.c:winhttp.h xfer_.c:xfer.h zip_.c:zip.h $(SRCDIR)\sqlite3.h $(SRCDIR)\th.h VERSION.h @copy /Y nul: headers DELETED www/checkin_names.wiki Index: www/checkin_names.wiki ================================================================== --- www/checkin_names.wiki +++ /dev/null @@ -1,174 +0,0 @@ -Check-in Names - -Many Fossil commands and [./webui.wiki | web-interface] URLs accept -check-in names as an argument. For example, the "info" command -accepts an optional check-in name to identify the specific checkout -about which information is desired: - -
-fossil info checkin-name -
- -You are perhaps reading this page from the following URL: - -
-http://www.fossil-scm.org/fossil/doc/trunk/www/checkin_names.wiki -
- -The URL above is an example of an [./embeddeddoc.wiki | embedded documentation] -page in Fossil. The bold term of the pathname is a check-in name that -determines which version of the documentation to display. - -Fossil provides a variety of ways to specify a check-in. This -document describes the various methods. - -

Canonical Check-in Name

- -The canonical name of a checkin is the SHA1 hash of its -[./fileformat.wiki#manifest | manifest] expressed as a 40-character -lowercase hexadecimal number. For example: - -
-fossil info e5a734a19a9826973e1d073b49dc2a16aa2308f9
-
- -The full 40-character SHA1 hash is unwieldy to remember and type, though, -os Fossil also accepts a unique prefix of the hash, using any combination -of upper and lower case letters, as long as the prefix is at least 4 -characters long. Hence the following commands all -accomplish the same thing as the above: - -
-fossil info e5a734a19a9
-fossil info E5a734A
-fossil info e5a7
-
- -Many web-interface screens identify check-ins by 10- or 16-character -prefix of canonical name. - -

Tags And Branch Names

- -Using a tag or branch name where a check-in name is expected causes -Fossil to choose the most recent check-in with that tag or branch name. -So, for example, as of this writing the most recent check-in that -is tagged with "release" is [d0753799e44]. -So the command: - -
-fossil info release
-
- -Results in the following input: - -
-uuid:         d0753799e447b795933e9f266233767d84aa1d84 2010-11-01 14:23:35 UTC
-parent:       4e1241f3236236187ad2a8f205323c05b98c9895 2010-10-31 21:51:11 UTC
-child:        4a094f46ade70bd9d1e4ffa48cbe94b4d3750aef 2010-11-01 18:52:37 UTC
-child:        f4033ec09ee6bb2a73fa588c217527a1f311bd27 2010-11-01 23:38:34 UTC
-tags:         trunk, release
-comment:      Fix a typo in the file format documentation reported on the
-              Tcl/Tk chatroom. (user: drh)
-
- -There are multiple check-ins that are tagged with "release" but -(as of this writing) the [d0753799e44] -check-in is the most recent so it is the one that is selected. - -Note that unlike other command DVCSes, a "branch" in Fossil -is not anything special; it is simply a sequence of check-ins that -share a common tag. So the same mechanism that resolves tag names -also resolves branch names. - -Note also that there can (in theory) be an ambiguity between tag names -and canonical names. Suppose, for example, you had a check-in with -the canonical name deed28aa99a835f01fa06d5b4a41ecc2121bf419 and you -also happened to have tagged a different check-in with "deed2". If -you use the "deed2" name, does it choose the canonical name or the tag -name? In such cases, you can prefix the tag name with "tag:". Hence - -
-fossil info tag:deed2 -
- -Will refer to the most recent check-in tagged with "deed2" not to the -check-in whose canonical name begins with "deed2". - -

Timestamps

- -A timestamp in one of the formats shown below means the most recent -check-in that occurs no later than the timestamp given: - - * YYYY-MM-DD - * YYYY-MM-DD HH:MM - * YYYY-MM-DD HH:MM:SS - -The space between the day and the year can optionally be -replaced by an uppercase T and the entire timestamp can -optinally be followed by "utc". - -In its default configuration, Fossil interprets and displays all dates -in Universal Coordinated Time (UTC). This tends to work the best for -distributed projects where participants are scattered around the globe. -But there is an open on the Admin/Timeline page of the web-interface to -switch to local time. The "utc" suffix on an timestamp check-in -name is meaningless if Fossil is in the default mode of using UTC for -everything, but if Fossil has been switched to localtime mode, then the -"utc" suffix means to interpret that particular timestamp using -UTC instead localtime. - -As an example, consider the homepage for the Fossil website itself: - -
-http://www.fossil-scm.org/fossil/doc/trunk/www/index.wiki -
- -The bold component of that URL is a check-in name. To see what the -Fossil website looked like on January 1, 2009, one has merely to change -the URL to the following: - -
-http://www.fossil-scm.org/fossil/doc/2009-01-01/www/index.wiki -
- -

Tag And Timestamp

- -A check-in name can also take the form of a tag or branch name followed by -a colon and then a timestamp. The combination means to take the most -recent check-in with the given tag or branch which is not more recent than -the timestamp. So, for example: - -
-fossil update trunk:2010-07-01T14:30 -
- -Would cause Fossil to update the working check-out to be the most recent -check-in on the trunk that is not more recent that 14:30 (UTC) on -July 1, 2010. - -

Special Tags

- -The tag "tip" means the most recent check-in. The "tip" tag is roughly -equivalent to the timestamp tag "5000-01-01". - -If the command is being run from a working check-out (not against a bare -repository) then a few extra tags apply. The "current" tag means the -current check-out. The "next" tag means the youngest child of the -current check-out. And the "previous" tag means the primary (non-merge) -parent of the current check-out. - -

Additional Examples

- -To view the changes in the most recent check-in prior to the version currently -checked out: - -
-fossil diff --from previous --to current
-
- -Suppose you are of the habit of tagging each release with a "release" tag. -Then to see everything that has changed on the trunk since the last release: - -
-fossil diff --from release --to trunk
-
DELETED www/fossil-v-git.wiki Index: www/fossil-v-git.wiki ================================================================== --- www/fossil-v-git.wiki +++ /dev/null @@ -1,203 +0,0 @@ -Fossil Versus Git - -

1.0 Don't Stress!

- -If you start out using one DVCS and later decide you like the other better, -it is [./inout.wiki | easy to change]. - -But it also helps to be informed about the differences between -[http://git-scm.com | Git] and Fossil. See the table below for -a high-level summary and the text that follows for more details. - -Keep in mind that you are reading this on a Fossil website, -so the information here -might be biased in favor of Fossil. Ask around with people who have -used both Fossil and Git for other opinions. - -

2.0 Executive Summary:

- -
- - - - - - - - - - - -
GITFOSSIL
File versioning onlyVersioning, Tickets, Wiki, and Blog/News
ShardingReplicating
Huge communityRoad less traveled
ComplexIntuitive
Separate web toolsIntegrated Web interface
Lots of little toolsSingle executable
Pile-of-files repositorySingle file repository
Uses "rebase"Immutable
GPLBSD
- -

3.0 Discussion

- -

3.1 Feature Set

- -Git provides file versioning services only, whereas Fossil adds an -integrated [./wikitheory.wiki | wiki], -[./bugtheory.wiki | ticketing & bug tracking], -[./embedddeddoc.wiki | embedded documentation], and -[./event.wiki | News/Blog features]. -These additional capabilities are available for Git as 3rd-party -user-installed add-ons, but with Fossil they are integrated into -the design. One way to describe Fossil is that it is -"[https://github.com/ | github]-in-a-box". - -

3.2 Sharding versus Replicating

- -Git makes it easy for each repository in a project to hold a subset of -the branches for that project. In fact, it is entirely possible and not -uncommon for no repository in the project to hold all the different code -versions for a project. Instead the information is distributed. -Individual developers have one or more private branches. A hierarchy -of integrators merge changes from individual developers into collaborative -branches, until all the changes are merged together at the top-level master -branch. And all of this can be accomplished without having to have all the -code in any one repository. Developers or groups of developers can share -only those branches that they want to share and keep other branchs of the -project private. This is analogous to sharding an a distributed database. - -Fossil allows private branches, but its default mode is to share everything. -And so in a Fossil project, all respositories tend to contain all of the -content at all times. This is analogous to replication in a -distributed database. - -The Git model works best for large projects, like the -Linux kernel for which Git was designed. -Linus Torvalds does not need or want to see a thousand -different branches, one for each contributor. Git allows intermediary -"gate-keepers" to merge changes from multiple lower-level developers -into a single branch and only present Linus with a handful of branches -at a time. Git encourages a programming model where each developer -works in his or her own branch and then merges changes up the hierarchy -until they reach the master branch. - -Fossil is designed for smaller and non-hierarchical teams where all -developers are operating directly on the master branch, or at most -a small number of well defined branches. -The [concepts.wiki#workflow | autosync] mode of Fossil makes it easy -for multiple developers to work on a single branch and maintain -linear development on that branch and avoid needless forking -and merging. - -

3.3 Community

- -Git has a huge user community. If following the herd and being -like everybody else is important to you, then you should choose Git. - -Fossil is clearly the "road less traveled": - -
-Two roads diverged in a wood, and I —
-I took the one less traveled by,
-And that has made all the difference.
-        -- Robert Frost, The Road Not Taken, 1916 -
- - -Among the advantages of Git's huge user community are that new team -members may already be familiar with Git's operation and hence can -bypass the VCS learning curve. Also, if you need an add-on tool or -script of some kind, a Google search will likely turn up a suitable -tool that you can just download and use. A huge community also means -that somebody else has likely already encountered and fixed the bugs -so that Git will work for you and your project as advertised. - -Among the advantages of the "road less traveled" is that your particular -project will be bigger percentage of the total user base, and is thus -more likely to receive personal attention from the Fossil maintainers -if you do encounter problems. - -

3.4 Complexity

- -Git is a complex system. It can be tricky to use and requires a fair -amount of knowledge and experience to master. Fossil strives to be -a much simpler system that can be learned and mastered much more quickly. -Fossil strives to have fewer "gotchas" and quirks that can trip up a -developer. - -The ideal VCS should just get out of the way of the developer and allow -the developer to focus 100% of their thinking on the project under -development. One should not have to stop and think about how to operate -the VCS. Of course, no VCS is ideal. Every VCS requires the developer -to think about version control to some extent. But one wants to minimize -the thinking about version control. - -Git requires the developer to maintain a more complex mental model than -most other DVCSes. Git takes longer to learn. And you have to spend -more time thinking about what you are doing with Git. - -Fossil strives for simplicity. Fossil wants to be easy to learn and to -require little thinking about how to operating it. Reports from the -field indicate that Fossil is mostly successful at this effort. - -

3.5 Web Interface

- -Git has a web interface, but it requires a fair amount of setup and an -external web server. Fossil comes with a fully functional -[./webui.wiki | built-in web-server] -and a really simple mechanism (the "fossil ui" command) to -automatically start the web server and bring up a web browser to navigate -it. The web interface for Fossil is not only easier to set up, it is also -more powerful and easier to use. The web interface to Fossil is a practical -replacement to the 3rd-party "GUI Tools" that users often employ to operate -Git. - -

3.6 Implementation Strategy

- -Git consists of a collection of many little programs. Git needs to be -"installed" using some kind of installer or package tool. Git can be -tricky to install and get working, especially for users without -administrative privileges. - -Fossil is a single self-contained executable. To "install" Fossil one -has merely to download a precompiled binary and place that binary -somewhere on $PATH. To uninstall Fossil, simply delete the binary. -To upgrade Fossil, replace the old binary with a new one. - -Fossil is designed to be trivial to install, uninstall, and upgrade so -that developers can spend more time working on their own projects and -much less time configuring their version control system. - -

3.7 Repository Storage

- -A Git repository is a "pile-of-files" in the ".git" directory at the -root of the working checkout. There is a one-to-one correspondence -between repositories and working checkouts. A power-loss or system crash -in the middle of Git operation can damage or corrupt the Git repository. - -A Fossil repository consists of a single disk file. A single Fossil -repository can serve multiple simultaneous working checkouts. -A Fossil repository is an SQLite database, so it highly resistant -to damage from a power-loss or system crash - incomplete transactions -are simply rolled back after the system reboots. - -

3.8 Audit Trail

- -Git features the "rebase" command which can be used to change the -sequence of check-ins in the repository. Rebase can be used to "clean up" -a complex sequence of check-ins to make their intent easier for others -to understand. From another point of view, rebase can be used to -"rewrite history" - to do what -[http://en.wikipedia.org/wiki/Winston_Smith | Winston Smith] did for -a living in Orwell's novel -[http://en.wikipedia.org/wiki/Nineteen_Eighty-Four | 1984]. - -Fossil deliberately avoids rewriting history. Fossil strives to follow -the accounting philosophy of never erasing anything. Mistakes are fixed -by entering a correction, with an explanation of why the correction is -needed. This can make the history of a project messy, but it also -makes it more honest. The lack of a "rebase" function is considered -a feature of Fossil, not a bug. - -

3.9 License

- -Both Git and Fossil are open-source. Git is under -[http://www.gnu.org/licenses/gpl.html | GPL] whereas Fossil is -under the -[http://en.wikipedia.org/wiki/BSD_licenses | two-clause BSD license]. -The difference should not be of a concern to most users. However, -some corporate lawyers have objections to using GPL products and -are more comfortable with a BSD-style license. Index: www/index.wiki ================================================================== --- www/index.wiki +++ www/index.wiki @@ -131,13 +131,10 @@ to configure the ticketing subsystem. * A free hosting server for Fossil repositories is available at [http://chiselapp.com/]. * How to [./server.wiki | set up a server] for your repository. * Customizing the [./custom_ticket.wiki | ticket system]. - * Methods to [./checkin_names.wiki | identify a specific check-in]. - * [./inout.wiki | Import and export] from and to Git. - * [./fossil-v-git.wiki | Fossil versus Git].

Links For Fossil Developer:

* [./theory1.wiki | Thoughts On The Design Of Fossil]. * [./pop.wiki | Principles Of Operation] DELETED www/inout.wiki Index: www/inout.wiki ================================================================== --- www/inout.wiki +++ /dev/null @@ -1,39 +0,0 @@ -Import And Export - -Fossil has the ability to import and export repositories from and to -[http://git-scm.com/ | Git]. And since most other version control -systems will also import/export from Git, that means that you can -import/export a Fossil repository to most version control systems using -Git as an intermediary. - -

Git → Fossil

- -To import a Git repository into Fossil, run commands like this: - -
-cd git-repo
-git fast-export --all | fossil import new-repo.fossil
-
- -In other words, simply pipe the output of the "git fast-export" command -into the "fossil import" command. The 2nd argument to the "fossil import" -command is the name of a new Fossil repository that is created to hold the Git -content. - -

Fossil → Git

- -To convert a Fossil repository into a Git repository, run commands like -this: - -
-git init new-repo
-cd new-repo
-fossil export -R ../repo.fossil | git fast-import
-
- -In other words, create a new Git repository, then pipe the output from the -"fossil export" commandn into the "git fast-import" command. - -Note that the "fossil export" command only exports the versioned files. -Tickets and wiki and events are not exported, since Git does not understand -those concepts. Index: www/quickstart.wiki ================================================================== --- www/quickstart.wiki +++ www/quickstart.wiki @@ -82,17 +82,10 @@ is not required.

Note: If you are behind a restrictive firewall, you might need to specify an HTTP proxy to use.

-

Importing From Another Version Control System

- -

Rather than start a new project, or clone an existing Fossil project, - you might prefer to - import an existing Git project - into Fossil. -

Checking Out A Local Tree

To work on a project in fossil, you need to check out a local copy of the source tree. Create the directory you want to be the root of your tree and cd into that directory. Then Index: www/sync.wiki ================================================================== --- www/sync.wiki +++ www/sync.wiki @@ -223,55 +223,20 @@

3.5 Clone Cards

A clone card works like a pull card in that it is sent from client to server in order to tell the server that the client -wants to pull content. The clone card comes in two formats. Older -clients use the no-argument format and newer clients use the -two-argument format.

- -
-clone
-clone protocol-version sequence-number -
- -

3.5.1 Protocol 2

- -

The latest clients send a two-argument clone message with a -protocol version of "2". (Future versions of Fossil might use larger -protocol version numbers.) The sequence-number sent is the number -of artifacts received so far. For the first clone message, the -sequence number if 0. The server will respond by sending file -cards for some number of artifacts up to the maximum message size. - -

The server will also send a single "clone_seqno" card to the client -so that the client can know where the server left off. - -

-clone_seqno sequence-number -
- -

The clone message in subquence HTTP requests for the same clone -operation will use the sequence-number from the -clone_seqno of the previous reply.

- -

In response to an initial clone message, the server also sends the client -a push message so that the client can discover the projectcode for -this project.

- -

3.5.2 Legacy Protocol

- -

Older clients send a clone card with no argument. The server responds -to a blank clone card by sending an "igot" card for every artifact in the -repository. The client will then issue "gimme" cards to pull down all the -content it needs. - -

The legacy protocol works well for smaller repositories (50MB with 50,000 -artifacts) but is too slow and unwieldy for larger repositories. -The version 2 protocol is an effort to improve performance. Further -performance improvements with higher-numbered clone protocols are -possible in future versions of Fossil. +wants to pull content. But unlike the pull card, the clone +card has no arguments.

+ +
+clone +
+ +

In response to a clone message, the server also sends the client +a push message so that the client can discover the projectcode for +this project.

3.6 Igot Cards

An igot card can be sent from either client to server or from server to client in order to indicate that the sender holds a copy @@ -322,100 +287,15 @@ a cookie from another server. (Typically the server will embed its servercode as part of the cookie.)

3.9 Request-Configuration Cards

-

A request-configuration or "reqconfig" card is sent from client to -server in order to request that the server send back "configuration" -data. "Configuration" data is information about users or website -appearance or other administrative details which are not part of the -persistent and versioned state of the project. For example, the "name" -of the project, the default Cascading Style Sheet (CSS) for the web-interface, -and the project logo displayed on the web-interface are all configuration -data elements. - -

The reqconfig card is normally sent in response to the -"fossil configuration pull" command. The format is as follows: - -

-reqconfig configuration-name -
- -

As of this writing ([2010-11-12]), the configuration-name must be one of the -following values: - -

- -
-
    -
  • css -
  • header -
  • footer -
  • logo-mimetype -
  • logo-image -
  • project-name -
  • project-description -
  • manifest -
  • index-page -
      -
    • timeline-block-markup -
    • timeline-max-comment -
    • ticket-table -
    • ticket-common -
    • ticket-newpage -
    • ticket-viewpage -
    • ticket-editpage -
    • ticket-reportlist -
    • ticket-report-template -
        -
      • ticket-key-template -
      • ticket-title-expr -
      • ticket-closed-expr -
      • @reportfmt -
      • @user -
      • @concealed -
      • @shun -
      - -

      New configuration-names are likely to be added in future releases of -Fossil. If the server receives a configuration-name that it does not -understand, the entire reqconfig card is silently ignored. The reqconfig -card might also be ignored if the user lacks sufficient privilege to -access the requested information. - -

      The configuration-names that begin with an alphabetic character refer -to values in the "config" table of the server database. For example, -the "logo-image" configuration item refers to the project logo image -that is configured on the Admin page of the [./webui.wiki | web-interface]. -The value of the configuration item is returned to the client using a -"config" card. - -

      If the configuration-name begins with "@", that refers to a class of -values instead of a single value. The content of these configuration items -is returned in a "config" card that contains pure SQL text that is -intended to be evaluated by the client. - -

      The @user and @concealed configuration items contain sensitive information -and are ignored for clients without sufficient privilege. +TBD...

      3.10 Configuration Cards

      -

      A "config" card is used to send configuration information from client -to server (in response to a "fossil configuration push" command) or -from server to client (in response to a "fossil configuration pull" or -"fossil clone" command). The format is as follows: - -

      -config configuration-name size \n content -
      - -

      The server will only accept a config card if the user has -"Admin" privilege. A client will only accept a config card if -it had sent a corresponding reqconfig card in its request. - -

      The content of the configuration item is used to overwrite the -corresponding configuration data in the receiver. +TBD...

      3.11 Error Cards

      If the server discovers anything wrong with a request, it generates an error card in its reply. When the client sees the error card,