[open-ils-commits] [GIT] Evergreen ILS branch master updated. 09873f3feaa4a861716bf0470172c29d3783da8d

Evergreen Git git at git.evergreen-ils.org
Mon Apr 1 14:27:21 EDT 2013


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Evergreen ILS".

The branch, master has been updated
       via  09873f3feaa4a861716bf0470172c29d3783da8d (commit)
       via  d17ff9c5b24208da34e374cb1c2e6110003a3470 (commit)
       via  d589e0c1af415ce3d107d40b47b1b9ba4491ef6b (commit)
       via  8b0139a80a70b436fbe8a1b2d5f445431d36d6aa (commit)
      from  e0dba3994a3808ec8b4e2f54b23b162bc6c3e523 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 09873f3feaa4a861716bf0470172c29d3783da8d
Author: Jason Stephenson <jstephenson at mvlc.org>
Date:   Mon Apr 1 14:25:02 2013 -0400

    Stamping upgrade script for revive-qp-fts (LP 1161601).
    
    Signed-off-by: Jason Stephenson <jstephenson at mvlc.org>

diff --git a/Open-ILS/src/sql/Pg/002.schema.config.sql b/Open-ILS/src/sql/Pg/002.schema.config.sql
index 07bb235..2637378 100644
--- a/Open-ILS/src/sql/Pg/002.schema.config.sql
+++ b/Open-ILS/src/sql/Pg/002.schema.config.sql
@@ -91,7 +91,7 @@ CREATE TRIGGER no_overlapping_deps
     BEFORE INSERT OR UPDATE ON config.db_patch_dependencies
     FOR EACH ROW EXECUTE PROCEDURE evergreen.array_overlap_check ('deprecates');
 
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('0785', :eg_version); -- senator/miker
+INSERT INTO config.upgrade_log (version, applied_to) VALUES ('0786', :eg_version); -- miker/dyrcona
 
 CREATE TABLE config.bib_source (
 	id		SERIAL	PRIMARY KEY,
diff --git a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.qp_fts_stored_proc.sql b/Open-ILS/src/sql/Pg/upgrade/0786.schema.qp_fts_stored_proc.sql
similarity index 99%
rename from Open-ILS/src/sql/Pg/upgrade/XXXX.schema.qp_fts_stored_proc.sql
rename to Open-ILS/src/sql/Pg/upgrade/0786.schema.qp_fts_stored_proc.sql
index 60d8c2a..f113f05 100644
--- a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.qp_fts_stored_proc.sql
+++ b/Open-ILS/src/sql/Pg/upgrade/0786.schema.qp_fts_stored_proc.sql
@@ -1,3 +1,8 @@
+BEGIN;
+
+--Check if we can apply the upgrade.
+SELECT evergreen.upgrade_deps_block_check('0786', :eg_version);
+
 
 CREATE TYPE search.search_result AS ( id BIGINT, rel NUMERIC, record INT, total INT, checked INT, visible INT, deleted INT, excluded INT );
 CREATE TYPE search.search_args AS ( id INT, field_class TEXT, field_name TEXT, table_alias TEXT, term TEXT, term_type TEXT );
@@ -327,3 +332,4 @@ END;
 $func$ LANGUAGE PLPGSQL;
 
  
+COMMIT;

commit d17ff9c5b24208da34e374cb1c2e6110003a3470
Author: Mike Rylander <mrylander at gmail.com>
Date:   Thu Mar 28 17:12:16 2013 -0400

    Upgrade script to bring back the stored proc
    
    Signed-off-by: Mike Rylander <mrylander at gmail.com>
    Signed-off-by: Jason Stephenson <jstephenson at mvlc.org>

diff --git a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.qp_fts_stored_proc.sql b/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.qp_fts_stored_proc.sql
new file mode 100644
index 0000000..60d8c2a
--- /dev/null
+++ b/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.qp_fts_stored_proc.sql
@@ -0,0 +1,329 @@
+
+CREATE TYPE search.search_result AS ( id BIGINT, rel NUMERIC, record INT, total INT, checked INT, visible INT, deleted INT, excluded INT );
+CREATE TYPE search.search_args AS ( id INT, field_class TEXT, field_name TEXT, table_alias TEXT, term TEXT, term_type TEXT );
+
+CREATE OR REPLACE FUNCTION search.query_parser_fts (
+
+    param_search_ou INT,
+    param_depth     INT,
+    param_query     TEXT,
+    param_statuses  INT[],
+    param_locations INT[],
+    param_offset    INT,
+    param_check     INT,
+    param_limit     INT,
+    metarecord      BOOL,
+    staff           BOOL,
+    param_pref_ou   INT DEFAULT NULL
+) RETURNS SETOF search.search_result AS $func$
+DECLARE
+
+    current_res         search.search_result%ROWTYPE;
+    search_org_list     INT[];
+    luri_org_list       INT[];
+    tmp_int_list        INT[];
+
+    check_limit         INT;
+    core_limit          INT;
+    core_offset         INT;
+    tmp_int             INT;
+
+    core_result         RECORD;
+    core_cursor         REFCURSOR;
+    core_rel_query      TEXT;
+
+    total_count         INT := 0;
+    check_count         INT := 0;
+    deleted_count       INT := 0;
+    visible_count       INT := 0;
+    excluded_count      INT := 0;
+
+BEGIN
+
+    check_limit := COALESCE( param_check, 1000 );
+    core_limit  := COALESCE( param_limit, 25000 );
+    core_offset := COALESCE( param_offset, 0 );
+
+    -- core_skip_chk := COALESCE( param_skip_chk, 1 );
+
+    IF param_search_ou > 0 THEN
+        IF param_depth IS NOT NULL THEN
+            SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
+        ELSE
+            SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
+        END IF;
+
+        SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
+
+    ELSIF param_search_ou < 0 THEN
+        SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
+
+        FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
+            SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
+            luri_org_list := luri_org_list || tmp_int_list;
+        END LOOP;
+
+        SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
+
+    ELSIF param_search_ou = 0 THEN
+        -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
+    END IF;
+
+    IF param_pref_ou IS NOT NULL THEN
+        SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
+        luri_org_list := luri_org_list || tmp_int_list;
+    END IF;
+
+    OPEN core_cursor FOR EXECUTE param_query;
+
+    LOOP
+
+        FETCH core_cursor INTO core_result;
+        EXIT WHEN NOT FOUND;
+        EXIT WHEN total_count >= core_limit;
+
+        total_count := total_count + 1;
+
+        CONTINUE WHEN total_count NOT BETWEEN  core_offset + 1 AND check_limit + core_offset;
+
+        check_count := check_count + 1;
+
+        PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
+        IF NOT FOUND THEN
+            -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
+            deleted_count := deleted_count + 1;
+            CONTINUE;
+        END IF;
+
+        PERFORM 1
+          FROM  biblio.record_entry b
+                JOIN config.bib_source s ON (b.source = s.id)
+          WHERE s.transcendant
+                AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
+
+        IF FOUND THEN
+            -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
+            visible_count := visible_count + 1;
+
+            current_res.id = core_result.id;
+            current_res.rel = core_result.rel;
+
+            tmp_int := 1;
+            IF metarecord THEN
+                SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
+            END IF;
+
+            IF tmp_int = 1 THEN
+                current_res.record = core_result.records[1];
+            ELSE
+                current_res.record = NULL;
+            END IF;
+
+            RETURN NEXT current_res;
+
+            CONTINUE;
+        END IF;
+
+        PERFORM 1
+          FROM  asset.call_number cn
+                JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
+                JOIN asset.uri uri ON (map.uri = uri.id)
+          WHERE NOT cn.deleted
+                AND cn.label = '##URI##'
+                AND uri.active
+                AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
+                AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
+          LIMIT 1;
+
+        IF FOUND THEN
+            -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
+            visible_count := visible_count + 1;
+
+            current_res.id = core_result.id;
+            current_res.rel = core_result.rel;
+
+            tmp_int := 1;
+            IF metarecord THEN
+                SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
+            END IF;
+
+            IF tmp_int = 1 THEN
+                current_res.record = core_result.records[1];
+            ELSE
+                current_res.record = NULL;
+            END IF;
+
+            RETURN NEXT current_res;
+
+            CONTINUE;
+        END IF;
+
+        IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
+
+            PERFORM 1
+              FROM  asset.call_number cn
+                    JOIN asset.copy cp ON (cp.call_number = cn.id)
+              WHERE NOT cn.deleted
+                    AND NOT cp.deleted
+                    AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
+                    AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                    AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.copy cp ON (cp.id = pr.target_copy)
+                  WHERE NOT cp.deleted
+                        AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                        AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+                -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
+                    excluded_count := excluded_count + 1;
+                    CONTINUE;
+                END IF;
+            END IF;
+
+        END IF;
+
+        IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
+
+            PERFORM 1
+              FROM  asset.call_number cn
+                    JOIN asset.copy cp ON (cp.call_number = cn.id)
+              WHERE NOT cn.deleted
+                    AND NOT cp.deleted
+                    AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
+                    AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                    AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.copy cp ON (cp.id = pr.target_copy)
+                  WHERE NOT cp.deleted
+                        AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                        AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+                    -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
+                    excluded_count := excluded_count + 1;
+                    CONTINUE;
+                END IF;
+            END IF;
+
+        END IF;
+
+        IF staff IS NULL OR NOT staff THEN
+
+            PERFORM 1
+              FROM  asset.opac_visible_copies
+              WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                    AND record IN ( SELECT * FROM unnest( core_result.records ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
+                  WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+
+                    -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
+                    excluded_count := excluded_count + 1;
+                    CONTINUE;
+                END IF;
+            END IF;
+
+        ELSE
+
+            PERFORM 1
+              FROM  asset.call_number cn
+                    JOIN asset.copy cp ON (cp.call_number = cn.id)
+              WHERE NOT cn.deleted
+                    AND NOT cp.deleted
+                    AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                    AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.copy cp ON (cp.id = pr.target_copy)
+                  WHERE NOT cp.deleted
+                        AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+
+                    PERFORM 1
+                      FROM  asset.call_number cn
+                            JOIN asset.copy cp ON (cp.call_number = cn.id)
+                      WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                            AND NOT cp.deleted
+                      LIMIT 1;
+
+                    IF FOUND THEN
+                        -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
+                        excluded_count := excluded_count + 1;
+                        CONTINUE;
+                    END IF;
+                END IF;
+
+            END IF;
+
+        END IF;
+
+        visible_count := visible_count + 1;
+
+        current_res.id = core_result.id;
+        current_res.rel = core_result.rel;
+
+        tmp_int := 1;
+        IF metarecord THEN
+            SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
+        END IF;
+
+        IF tmp_int = 1 THEN
+            current_res.record = core_result.records[1];
+        ELSE
+            current_res.record = NULL;
+        END IF;
+
+        RETURN NEXT current_res;
+
+        IF visible_count % 1000 = 0 THEN
+            -- RAISE NOTICE ' % visible so far ... ', visible_count;
+        END IF;
+
+    END LOOP;
+
+    current_res.id = NULL;
+    current_res.rel = NULL;
+    current_res.record = NULL;
+    current_res.total = total_count;
+    current_res.checked = check_count;
+    current_res.deleted = deleted_count;
+    current_res.visible = visible_count;
+    current_res.excluded = excluded_count;
+
+    CLOSE core_cursor;
+
+    RETURN NEXT current_res;
+
+END;
+$func$ LANGUAGE PLPGSQL;
+
+ 

commit d589e0c1af415ce3d107d40b47b1b9ba4491ef6b
Author: Mike Rylander <mrylander at gmail.com>
Date:   Mon Mar 25 14:51:21 2013 -0400

    Revert "Remove search.query_parser_fts from schema"
    
    This reverts commit ab9fb958e387a20cfe9fafb6035fb72bc5f1fb3f.
    
    Signed-off-by: Mike Rylander <mrylander at gmail.com>
    Signed-off-by: Jason Stephenson <jstephenson at mvlc.org>

diff --git a/Open-ILS/src/sql/Pg/040.schema.asset.sql b/Open-ILS/src/sql/Pg/040.schema.asset.sql
index af44935..ea10156 100644
--- a/Open-ILS/src/sql/Pg/040.schema.asset.sql
+++ b/Open-ILS/src/sql/Pg/040.schema.asset.sql
@@ -127,7 +127,7 @@ CREATE TABLE asset.opac_visible_copies (
 );
 COMMENT ON TABLE asset.opac_visible_copies IS $$
 Materialized view of copies that are visible in the OPAC, used by
-staged search to speed up OPAC visibility checks on large
+search.query_parser_fts() to speed up OPAC visibility checks on large
 databases.  Contents are maintained by a set of triggers.
 $$;
 CREATE INDEX opac_visible_copies_idx1 on asset.opac_visible_copies (record, circ_lib);
diff --git a/Open-ILS/src/sql/Pg/300.schema.staged_search.sql b/Open-ILS/src/sql/Pg/300.schema.staged_search.sql
index c0f7f55..26d9b53 100644
--- a/Open-ILS/src/sql/Pg/300.schema.staged_search.sql
+++ b/Open-ILS/src/sql/Pg/300.schema.staged_search.sql
@@ -30,5 +30,332 @@ CREATE TABLE search.relevance_adjustment (
 );
 CREATE UNIQUE INDEX bump_once_per_field_idx ON search.relevance_adjustment ( field, bump_type );
 
+CREATE TYPE search.search_result AS ( id BIGINT, rel NUMERIC, record INT, total INT, checked INT, visible INT, deleted INT, excluded INT );
+CREATE TYPE search.search_args AS ( id INT, field_class TEXT, field_name TEXT, table_alias TEXT, term TEXT, term_type TEXT );
+
+CREATE OR REPLACE FUNCTION search.query_parser_fts (
+
+    param_search_ou INT,
+    param_depth     INT,
+    param_query     TEXT,
+    param_statuses  INT[],
+    param_locations INT[],
+    param_offset    INT,
+    param_check     INT,
+    param_limit     INT,
+    metarecord      BOOL,
+    staff           BOOL,
+    param_pref_ou   INT DEFAULT NULL
+) RETURNS SETOF search.search_result AS $func$
+DECLARE
+
+    current_res         search.search_result%ROWTYPE;
+    search_org_list     INT[];
+    luri_org_list       INT[];
+    tmp_int_list        INT[];
+
+    check_limit         INT;
+    core_limit          INT;
+    core_offset         INT;
+    tmp_int             INT;
+
+    core_result         RECORD;
+    core_cursor         REFCURSOR;
+    core_rel_query      TEXT;
+
+    total_count         INT := 0;
+    check_count         INT := 0;
+    deleted_count       INT := 0;
+    visible_count       INT := 0;
+    excluded_count      INT := 0;
+
+BEGIN
+
+    check_limit := COALESCE( param_check, 1000 );
+    core_limit  := COALESCE( param_limit, 25000 );
+    core_offset := COALESCE( param_offset, 0 );
+
+    -- core_skip_chk := COALESCE( param_skip_chk, 1 );
+
+    IF param_search_ou > 0 THEN
+        IF param_depth IS NOT NULL THEN
+            SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
+        ELSE
+            SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
+        END IF;
+
+        SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
+
+    ELSIF param_search_ou < 0 THEN
+        SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
+
+        FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
+            SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
+            luri_org_list := luri_org_list || tmp_int_list;
+        END LOOP;
+
+        SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
+
+    ELSIF param_search_ou = 0 THEN
+        -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
+    END IF;
+
+    IF param_pref_ou IS NOT NULL THEN
+        SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
+        luri_org_list := luri_org_list || tmp_int_list;
+    END IF;
+
+    OPEN core_cursor FOR EXECUTE param_query;
+
+    LOOP
+
+        FETCH core_cursor INTO core_result;
+        EXIT WHEN NOT FOUND;
+        EXIT WHEN total_count >= core_limit;
+
+        total_count := total_count + 1;
+
+        CONTINUE WHEN total_count NOT BETWEEN  core_offset + 1 AND check_limit + core_offset;
+
+        check_count := check_count + 1;
+
+        PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
+        IF NOT FOUND THEN
+            -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
+            deleted_count := deleted_count + 1;
+            CONTINUE;
+        END IF;
+
+        PERFORM 1
+          FROM  biblio.record_entry b
+                JOIN config.bib_source s ON (b.source = s.id)
+          WHERE s.transcendant
+                AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
+
+        IF FOUND THEN
+            -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
+            visible_count := visible_count + 1;
+
+            current_res.id = core_result.id;
+            current_res.rel = core_result.rel;
+
+            tmp_int := 1;
+            IF metarecord THEN
+                SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
+            END IF;
+
+            IF tmp_int = 1 THEN
+                current_res.record = core_result.records[1];
+            ELSE
+                current_res.record = NULL;
+            END IF;
+
+            RETURN NEXT current_res;
+
+            CONTINUE;
+        END IF;
+
+        PERFORM 1
+          FROM  asset.call_number cn
+                JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
+                JOIN asset.uri uri ON (map.uri = uri.id)
+          WHERE NOT cn.deleted
+                AND cn.label = '##URI##'
+                AND uri.active
+                AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
+                AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
+          LIMIT 1;
+
+        IF FOUND THEN
+            -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
+            visible_count := visible_count + 1;
+
+            current_res.id = core_result.id;
+            current_res.rel = core_result.rel;
+
+            tmp_int := 1;
+            IF metarecord THEN
+                SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
+            END IF;
+
+            IF tmp_int = 1 THEN
+                current_res.record = core_result.records[1];
+            ELSE
+                current_res.record = NULL;
+            END IF;
+
+            RETURN NEXT current_res;
+
+            CONTINUE;
+        END IF;
+
+        IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
+
+            PERFORM 1
+              FROM  asset.call_number cn
+                    JOIN asset.copy cp ON (cp.call_number = cn.id)
+              WHERE NOT cn.deleted
+                    AND NOT cp.deleted
+                    AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
+                    AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                    AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.copy cp ON (cp.id = pr.target_copy)
+                  WHERE NOT cp.deleted
+                        AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                        AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+                -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
+                    excluded_count := excluded_count + 1;
+                    CONTINUE;
+                END IF;
+            END IF;
+
+        END IF;
+
+        IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
+
+            PERFORM 1
+              FROM  asset.call_number cn
+                    JOIN asset.copy cp ON (cp.call_number = cn.id)
+              WHERE NOT cn.deleted
+                    AND NOT cp.deleted
+                    AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
+                    AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                    AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.copy cp ON (cp.id = pr.target_copy)
+                  WHERE NOT cp.deleted
+                        AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                        AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+                    -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
+                    excluded_count := excluded_count + 1;
+                    CONTINUE;
+                END IF;
+            END IF;
+
+        END IF;
+
+        IF staff IS NULL OR NOT staff THEN
+
+            PERFORM 1
+              FROM  asset.opac_visible_copies
+              WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                    AND record IN ( SELECT * FROM unnest( core_result.records ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
+                  WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+
+                    -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
+                    excluded_count := excluded_count + 1;
+                    CONTINUE;
+                END IF;
+            END IF;
+
+        ELSE
+
+            PERFORM 1
+              FROM  asset.call_number cn
+                    JOIN asset.copy cp ON (cp.call_number = cn.id)
+              WHERE NOT cn.deleted
+                    AND NOT cp.deleted
+                    AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                    AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+              LIMIT 1;
+
+            IF NOT FOUND THEN
+
+                PERFORM 1
+                  FROM  biblio.peer_bib_copy_map pr
+                        JOIN asset.copy cp ON (cp.id = pr.target_copy)
+                  WHERE NOT cp.deleted
+                        AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
+                        AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
+                  LIMIT 1;
+
+                IF NOT FOUND THEN
+
+                    PERFORM 1
+                      FROM  asset.call_number cn
+                            JOIN asset.copy cp ON (cp.call_number = cn.id)
+                      WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
+                            AND NOT cp.deleted
+                      LIMIT 1;
+
+                    IF FOUND THEN
+                        -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
+                        excluded_count := excluded_count + 1;
+                        CONTINUE;
+                    END IF;
+                END IF;
+
+            END IF;
+
+        END IF;
+
+        visible_count := visible_count + 1;
+
+        current_res.id = core_result.id;
+        current_res.rel = core_result.rel;
+
+        tmp_int := 1;
+        IF metarecord THEN
+            SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
+        END IF;
+
+        IF tmp_int = 1 THEN
+            current_res.record = core_result.records[1];
+        ELSE
+            current_res.record = NULL;
+        END IF;
+
+        RETURN NEXT current_res;
+
+        IF visible_count % 1000 = 0 THEN
+            -- RAISE NOTICE ' % visible so far ... ', visible_count;
+        END IF;
+
+    END LOOP;
+
+    current_res.id = NULL;
+    current_res.rel = NULL;
+    current_res.record = NULL;
+    current_res.total = total_count;
+    current_res.checked = check_count;
+    current_res.deleted = deleted_count;
+    current_res.visible = visible_count;
+    current_res.excluded = excluded_count;
+
+    CLOSE core_cursor;
+
+    RETURN NEXT current_res;
+
+END;
+$func$ LANGUAGE PLPGSQL;
+
 COMMIT;
 

commit 8b0139a80a70b436fbe8a1b2d5f445431d36d6aa
Author: Mike Rylander <mrylander at gmail.com>
Date:   Mon Mar 25 14:50:55 2013 -0400

    Revert "Remove dependence on search.query_parser_fts proc"
    
    This reverts commit 6d8872cf120caf67ad6f65995b2c5155fa5ab652.
    
    Conflicts:
    	Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Driver/Pg/QueryParser.pm
    
    Signed-off-by: Mike Rylander <mrylander at gmail.com>
    Signed-off-by: Jason Stephenson <jstephenson at mvlc.org>

diff --git a/Open-ILS/examples/opensrf.xml.example b/Open-ILS/examples/opensrf.xml.example
index 7e69ff4..658ced1 100644
--- a/Open-ILS/examples/opensrf.xml.example
+++ b/Open-ILS/examples/opensrf.xml.example
@@ -502,6 +502,22 @@ vim:et:ts=4:sw=4:
                     <use_staged_search>true</use_staged_search>
 
                     <!--
+                        For staged search, we estimate hits based on inclusion or exclusion.
+
+                        Valid settings:
+                            inclusion - visible ratio on superpage
+                            exclusion - excluded ratio on superpage
+                            delete_adjusted_inclusion - included ratio on superpage, ratio adjusted by deleted count
+                            delete_adjusted_exclusion - excluded ratio on superpage, ratio adjusted by deleted count
+
+                        Under normal circumstances, inclusion is the best strategy, and both delete_adjusted variants
+                        will return the same value +/- 1.  The exclusion strategy is the original, and works well
+                        when there are few deleted or excluded records, in other words, when the superpage is not
+                        sparsely populated with visible records.
+                    -->
+                    <estimation_strategy>inclusion</estimation_strategy>
+
+                    <!--
                         Evergreen uses a cover density algorithm for calculating relative ranking of matches.  There
                         are several tuning parameters and options available.  By default, no document length normalization
                         is applied.  From the Postgres documentation on ts_rank_cd() (the function used by Evergreen):
@@ -560,8 +576,11 @@ vim:et:ts=4:sw=4:
                     -->
                     <default_preferred_language_weight>5</default_preferred_language_weight>
 
-                    <!-- How many search results to return. Defaults to superpage_size * max_superpages, if they are defined and it isn't. -->
-                    <max_search_results>10000</max_search_results>
+                    <!-- Baseline number of records to check for hit estimation. -->
+                    <superpage_size>1000</superpage_size>
+
+                    <!-- How many superpages to consider for searching overall. -->
+                    <max_superpages>10</max_superpages>
 
                     <!-- zip code database file -->
                     <!--<zips_file>LOCALSTATEDIR/data/zips.txt</zips_file>-->
diff --git a/Open-ILS/src/perlmods/lib/OpenILS/Application/Search/Biblio.pm b/Open-ILS/src/perlmods/lib/OpenILS/Application/Search/Biblio.pm
index 434e326..6d149ca 100644
--- a/Open-ILS/src/perlmods/lib/OpenILS/Application/Search/Biblio.pm
+++ b/Open-ILS/src/perlmods/lib/OpenILS/Application/Search/Biblio.pm
@@ -36,7 +36,8 @@ my $pfx = "open-ils.search_";
 
 my $cache;
 my $cache_timeout;
-my $max_search_results;
+my $superpage_size;
+my $max_superpages;
 
 sub initialize {
 	$cache = OpenSRF::Utils::Cache->new('global');
@@ -44,17 +45,14 @@ sub initialize {
 	$cache_timeout = $sclient->config_value(
 			"apps", "open-ils.search", "app_settings", "cache_timeout" ) || 300;
 
-	my $superpage_size = $sclient->config_value(
+	$superpage_size = $sclient->config_value(
 			"apps", "open-ils.search", "app_settings", "superpage_size" ) || 500;
 
-	my $max_superpages = $sclient->config_value(
+	$max_superpages = $sclient->config_value(
 			"apps", "open-ils.search", "app_settings", "max_superpages" ) || 20;
 
-    $max_search_results = $sclient->config_value(
-            "apps", "open-ils.search", "app_settings", "max_search_results" ) || ($superpage_size * $max_superpages);
-
 	$logger->info("Search cache timeout is $cache_timeout, ".
-        " max_search_results is $max_search_results");
+        " superpage_size is $superpage_size, max_superpages is $max_superpages");
 }
 
 
@@ -1271,8 +1269,24 @@ sub staged_search {
     $user_offset = ($user_offset >= 0) ? $user_offset :  0;
     $user_limit  = ($user_limit  >= 0) ? $user_limit  : 10;
 
-    # restrict DB query to our max results
-    $search_hash->{core_limit}  = $max_search_results;
+
+    # we're grabbing results on a per-superpage basis, which means the 
+    # limit and offset should coincide with superpage boundaries
+    $search_hash->{offset} = 0;
+    $search_hash->{limit} = $superpage_size;
+
+    # force a well-known check_limit
+    $search_hash->{check_limit} = $superpage_size; 
+    # restrict total tested to superpage size * number of superpages
+    $search_hash->{core_limit}  = $superpage_size * $max_superpages;
+
+    # Set the configured estimation strategy, defaults to 'inclusion'.
+	my $estimation_strategy = OpenSRF::Utils::SettingsClient
+        ->new
+        ->config_value(
+            apps => 'open-ils.search', app_settings => 'estimation_strategy'
+        ) || 'inclusion';
+	$search_hash->{estimation_strategy} = $estimation_strategy;
 
     # pull any existing results from the cache
     my $key = search_cache_key($method, $search_hash);
@@ -1282,67 +1296,126 @@ sub staged_search {
     # keep retrieving results until we find enough to 
     # fulfill the user-specified limit and offset
     my $all_results = [];
-
-    my $results;
-    my $summary;
+    my $page; # current superpage
+    my $est_hit_count = 0;
+    my $current_page_summary = {};
+    my $global_summary = {checked => 0, visible => 0, excluded => 0, deleted => 0, total => 0};
+    my $is_real_hit_count = 0;
     my $new_ids = [];
 
-    if($cache_data->{summary}) {
-        # this window of results is already cached
-        $logger->debug("staged search: found cached results");
-        $summary = $cache_data->{summary};
-        $results = $cache_data->{results};
+    for($page = 0; $page < $max_superpages; $page++) {
 
-    } else {
-        # retrieve the window of results from the database
-        $logger->debug("staged search: fetching results from the database");
-        my $start = time;
-        $results = $U->storagereq($method, %$search_hash);
-        $search_duration = time - $start;
-        $summary = shift(@$results) if $results;
-
-        unless($summary) {
-            $logger->info("search timed out: duration=$search_duration: params=".
-                OpenSRF::Utils::JSON->perl2JSON($search_hash));
-            return {count => 0};
-        }
+        my $data = $cache_data->{$page};
+        my $results;
+        my $summary;
 
-        $logger->info("staged search: DB call took $search_duration seconds and returned ".scalar(@$results)." rows, including summary");
+        $logger->debug("staged search: analyzing superpage $page");
 
-        my $hc = $summary->{visible};
-        if($hc == 0) {
-            $logger->info("search returned 0 results: duration=$search_duration: params=".
-                OpenSRF::Utils::JSON->perl2JSON($search_hash));
-        }
+        if($data) {
+            # this window of results is already cached
+            $logger->debug("staged search: found cached results");
+            $summary = $data->{summary};
+            $results = $data->{results};
 
-        # Create backwards-compatible result structures
-        if($IAmMetabib) {
-            $results = [map {[$_->{id}, $_->{rel}, $_->{record}]} @$results];
         } else {
-            $results = [map {[$_->{id}]} @$results];
+            # retrieve the window of results from the database
+            $logger->debug("staged search: fetching results from the database");
+            $search_hash->{skip_check} = $page * $superpage_size;
+            my $start = time;
+            $results = $U->storagereq($method, %$search_hash);
+            $search_duration = time - $start;
+            $summary = shift(@$results) if $results;
+
+            unless($summary) {
+                $logger->info("search timed out: duration=$search_duration: params=".
+                    OpenSRF::Utils::JSON->perl2JSON($search_hash));
+                return {count => 0};
+            }
+
+            $logger->info("staged search: DB call took $search_duration seconds and returned ".scalar(@$results)." rows, including summary");
+
+            my $hc = $summary->{estimated_hit_count} || $summary->{visible};
+            if($hc == 0) {
+                $logger->info("search returned 0 results: duration=$search_duration: params=".
+                    OpenSRF::Utils::JSON->perl2JSON($search_hash));
+            }
+
+            # Create backwards-compatible result structures
+            if($IAmMetabib) {
+                $results = [map {[$_->{id}, $_->{rel}, $_->{record}]} @$results];
+            } else {
+                $results = [map {[$_->{id}]} @$results];
+            }
+
+            push @$new_ids, grep {defined($_)} map {$_->[0]} @$results;
+            $results = [grep {defined $_->[0]} @$results];
+            cache_staged_search_page($key, $page, $summary, $results) if $docache;
         }
 
-        push @$new_ids, grep {defined($_)} map {$_->[0]} @$results;
-        $results = [grep {defined $_->[0]} @$results];
-        cache_staged_search($key, $summary, $results) if $docache;
-    }
+        tag_circulated_records($search_hash->{authtoken}, $results, $IAmMetabib) 
+            if $search_hash->{tag_circulated_records} and $search_hash->{authtoken};
+
+        $current_page_summary = $summary;
+
+        # add the new set of results to the set under construction
+        push(@$all_results, @$results);
+
+        my $current_count = scalar(@$all_results);
 
-    tag_circulated_records($search_hash->{authtoken}, $results, $IAmMetabib) 
-        if $search_hash->{tag_circulated_records} and $search_hash->{authtoken};
+        $est_hit_count = $summary->{estimated_hit_count} || $summary->{visible}
+            if $page == 0;
+
+        $logger->debug("staged search: located $current_count, with estimated hits=".
+            $summary->{estimated_hit_count}." : visible=".$summary->{visible}.", checked=".$summary->{checked});
+
+		if (defined($summary->{estimated_hit_count})) {
+            foreach (qw/ checked visible excluded deleted /) {
+                $global_summary->{$_} += $summary->{$_};
+            }
+			$global_summary->{total} = $summary->{total};
+		}
 
-    # add the new set of results to the set under construction
-    push(@$all_results, @$results);
+        # we've found all the possible hits
+        last if $current_count == $summary->{visible}
+            and not defined $summary->{estimated_hit_count};
 
-    my $current_count = scalar(@$all_results);
+        # we've found enough results to satisfy the requested limit/offset
+        last if $current_count >= ($user_limit + $user_offset);
 
-    $logger->debug("staged search: located $current_count, visible=".$summary->{visible});
+        # we've scanned all possible hits
+        if($summary->{checked} < $superpage_size) {
+            $est_hit_count = scalar(@$all_results);
+            # we have all possible results in hand, so we know the final hit count
+            $is_real_hit_count = 1;
+            last;
+        }
+    }
 
     my @results = grep {defined $_} @$all_results[$user_offset..($user_offset + $user_limit - 1)];
 
+	# refine the estimate if we have more than one superpage
+	if ($page > 0 and not $is_real_hit_count) {
+		if ($global_summary->{checked} >= $global_summary->{total}) {
+			$est_hit_count = $global_summary->{visible};
+		} else {
+			my $updated_hit_count = $U->storagereq(
+				'open-ils.storage.fts_paging_estimate',
+				$global_summary->{checked},
+				$global_summary->{visible},
+				$global_summary->{excluded},
+				$global_summary->{deleted},
+				$global_summary->{total}
+			);
+			$est_hit_count = $updated_hit_count->{$estimation_strategy};
+		}
+	}
+
     $conn->respond_complete(
         {
-            count             => $summary->{visible},
+            count             => $est_hit_count,
             core_limit        => $search_hash->{core_limit},
+            superpage_size    => $search_hash->{check_limit},
+            superpage_summary => $current_page_summary,
             facet_key         => $facet_key,
             ids               => \@results
         }
@@ -1509,15 +1582,18 @@ sub cache_facets {
     $cache->put_cache($key, $data, $cache_timeout);
 }
 
-sub cache_staged_search {
+sub cache_staged_search_page {
     # puts this set of results into the cache
-    my($key, $summary, $results) = @_;
-    my $data =  {
+    my($key, $page, $summary, $results) = @_;
+    my $data = $cache->get_cache($key);
+    $data ||= {};
+    $data->{$page} = {
         summary => $summary,
         results => $results
     };
 
-    $logger->info("staged search: cached with key=$key, visible=".$summary->{visible});
+    $logger->info("staged search: cached with key=$key, superpage=$page, estimated=".
+        $summary->{estimated_hit_count}.", visible=".$summary->{visible});
 
     $cache->put_cache($key, $data, $cache_timeout);
 }
diff --git a/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Driver/Pg/QueryParser.pm b/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Driver/Pg/QueryParser.pm
index 3fa1330..26aa733 100644
--- a/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Driver/Pg/QueryParser.pm
+++ b/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Driver/Pg/QueryParser.pm
@@ -45,24 +45,6 @@ sub filter_group_entry_callback {
     );
 }
 
-sub location_groups_callback {
-    my ($invocant, $self, $struct, $filter, $params, $negate) = @_;
-
-    return sprintf(' %slocations(%s)',
-        $negate ? '-' : '',
-        join(
-            ',',
-            map {
-                $_->location
-            } @{
-                OpenILS::Utils::CStoreEditor
-                    ->new
-                    ->search_asset_copy_location_group_map({ lgroup => $params })
-            }
-        )
-    );
-}
-
 sub format_callback {
     my ($invocant, $self, $struct, $filter, $params, $negate) = @_;
 
@@ -694,9 +676,8 @@ use OpenSRF::Utils::Logger qw($logger);
 use OpenSRF::Utils qw/:datetime/;
 use Data::Dumper;
 use OpenILS::Application::AppUtils;
-use OpenILS::Utils::CStoreEditor;
 my $apputils = "OpenILS::Application::AppUtils";
-my $editor = OpenILS::Utils::CStoreEditor->new;
+
 
 sub toSQL {
     my $self = shift;
@@ -711,7 +692,6 @@ sub toSQL {
             $filters{$col} = $filter->args->[0];
         }
     }
-    $self->new_filter( statuses => [0,7,12] ) if ($self->find_modifier('available'));
 
     $self->QueryParser->superpage($filters{superpage}) if ($filters{superpage});
     $self->QueryParser->superpage_size($filters{superpage_size}) if ($filters{superpage_size});
@@ -746,6 +726,8 @@ sub toSQL {
     }
     $rel = "1.0/($rel)::NUMERIC";
 
+    my $mra_join = 'INNER JOIN metabib.record_attr mrd ON m.source = mrd.id';
+    
     my $rank = $rel;
 
     my $desc = 'ASC';
@@ -775,220 +757,29 @@ sub toSQL {
     if ($flat_where ne '') {
         $flat_where = "AND (\n" . ${spc} x 5 . $flat_where . "\n" . ${spc} x 4 . ")";
     }
+    my $with = $$flat_plan{with};
+    $with= "\nWITH $with" if $with;
 
-    my $site = $self->find_filter('site');
-    if ($site && $site->args) {
-        $site = $site->args->[0];
-        if ($site && $site !~ /^(-)?\d+$/) {
-            my $search = $editor->search_actor_org_unit({ shortname => $site });
-            $site = @$search[0]->id if($search && @$search);
-            $site = undef unless ($search);
-        }
-    } else {
-        $site = undef;
-    }
-    my $lasso = $self->find_filter('lasso');
-    if ($lasso && $lasso->args) {
-        $lasso = $lasso->args->[0];
-        if ($lasso && $lasso !~ /^\d+$/) {
-            my $search = $editor->search_actor_org_lasso({ name => $lasso });
-            $lasso = @$search[0]->id if($search && @$search);
-            $lasso = undef unless ($search);
-        }
-    } else {
-        $lasso = undef;
-    }
-    my $depth = $self->find_filter('depth');
-    if ($depth && $depth->args) {
-        $depth = $depth->args->[0];
-        if ($depth && $depth !~ /^\d+$/) {
-            # This *is* what metabib.pm has been doing....but it makes no sense to me. :/
-            # Should this be looking up the depth of the OU type on the OU in question?
-            my $search = $editor->search_actor_org_unit([{ name => $depth },{ opac_label => $depth }]);
-            $depth = @$search[0]->id if($search && @$search);
-            $depth = undef unless($search);
-        }
-    } else {
-        $depth = undef;
-    }
-    my $pref_ou = $self->find_filter('pref_ou');
-    if ($pref_ou && $pref_ou->args) {
-        $pref_ou = $pref_ou->args->[0];
-        if ($pref_ou && $pref_ou !~ /^(-)?\d+$/) {
-            my $search = $editor->search_actor_org_unit({ shortname => $pref_ou });
-            $pref_ou = @$search[0]->id if($search && @$search);
-            $pref_ou = undef unless ($search);
-        }
-    } else {
-        $pref_ou = undef;
-    }
-
-    # Supposedly at some point a site of 0 and a depth will equal user lasso id.
-    # We need OU buckets before that happens. 'my_lasso' is, I believe, the target filter for it.
-
-    $site = -$lasso if ($lasso);
-
-    # Default to the top of the org tree if we have nothing else. This would need tweaking for the user lasso bit.
-    if (!$site) {
-        my $search = $editor->search_actor_org_unit({ parent_ou => undef });
-        $site = @$search[0]->id if ($search);
-    }
-
-    my $depth_check = '';
-    $depth_check = ", $depth" if ($depth);
-
-    my $with = '';
-    $with .= "     search_org_list AS (\n";
-    if ($site < 0) {
-        # Lasso!
-        $lasso = -$site;
-        $with .= "       SELECT DISTINCT org_unit from actor.org_lasso_map WHERE lasso = $lasso\n";
-    } elsif ($site > 0) {
-        $with .= "       SELECT DISTINCT id FROM actor.org_unit_descendants($site$depth_check)\n";
-    } else {
-        # Placeholder for user lasso stuff.
-    }
-    $with .= "     ),\n";
-    $with .= "     luri_org_list AS (\n";
-    if ($site < 0) {
-        # We can re-use the lasso var, we already updated it above.
-        $with .= "       SELECT DISTINCT (actor.org_unit_ancestors(org_unit)).id from actor.org_lasso_map WHERE lasso = $lasso\n";
-    } elsif ($site > 0) {
-        $with .= "       SELECT DISTINCT id FROM actor.org_unit_ancestors($site)\n";
-    } else {
-        # Placeholder for user lasso stuff.
-    }
-    if ($pref_ou) {
-        $with .= "       UNION\n";
-        $with .= "       SELECT DISTINCT id FROM actor.org_unit_ancestors($pref_ou)\n";
-    }
-    $with .= "     )";
-    $with .= ",\n     " . $$flat_plan{with} if ($$flat_plan{with});
-
-    # Limit stuff
-    my $limit_where = <<"    SQL";
--- Filter records based on visibility
-        AND NOT bre.deleted
-        AND (
-            cbs.transcendant IS TRUE
-            OR
-    SQL
-
-    if ($self->find_modifier('deleted')) {
-        $limit_where = <<"        SQL";
-            AND bre.deleted
-        SQL
-    } elsif ($self->find_modifier('staff')) {
-        $limit_where .= <<"        SQL";
-            EXISTS(
-                SELECT 1 FROM asset.call_number cn
-                    JOIN asset.copy cp ON (cp.call_number = cn.id)
-                WHERE NOT cn.deleted
-                    AND NOT cp.deleted
-                    AND cp.circ_lib IN ( SELECT * FROM search_org_list )
-                    AND cn.record = m.source
-                LIMIT 1
-            )
-            OR
-            EXISTS(
-                SELECT 1 FROM biblio.peer_bib_copy_map pr
-                    JOIN asset.copy cp ON (cp.id = pr.target_copy)
-                WHERE NOT cp.deleted
-                    AND cp.circ_lib IN ( SELECT * FROM search_org_list )
-                    AND pr.peer_record = m.source
-                LIMIT 1
-            )
-            OR (
-                NOT EXISTS(
-                    SELECT 1 FROM asset.call_number cn
-                        JOIN asset.copy cp ON (cp.call_number = cn.id)
-                    WHERE cn.record = m.source
-                        AND NOT cp.deleted
-                    LIMIT 1
-                )
-                AND
-                NOT EXISTS(
-                    SELECT 1 FROM biblio.peer_bib_copy_map pr
-                        JOIN asset.copy cp ON (cp.id = pr.target_copy)
-                    WHERE NOT cp.deleted
-                        AND pr.peer_record = m.source
-                    LIMIT 1
-                )
-                AND
-                NOT EXISTS(
-                    SELECT 1 FROM asset.call_number acn
-                        JOIN asset.uri_call_number_map aucnm ON acn.id = aucnm.call_number
-                        JOIN asset.uri uri ON aucnm.uri = uri.id
-                    WHERE NOT acn.deleted
-                        AND uri.active
-                        AND acn.record = m.source
-                    LIMIT 1
-                )
-            )
-            OR
-            EXISTS(
-                SELECT 1 FROM asset.call_number acn
-                    JOIN asset.uri_call_number_map aucnm ON acn.id = aucnm.call_number
-                    JOIN asset.uri uri ON aucnm.uri = uri.id
-                WHERE NOT acn.deleted AND uri.active AND acn.record = m.source AND acn.owning_lib IN (
-                    SELECT * FROM luri_org_list
-                )
-                LIMIT 1
-            )
-        )
-        SQL
-    } else {
-        $limit_where .= <<"        SQL";
-            EXISTS(
-                SELECT 1 FROM asset.opac_visible_copies
-                WHERE circ_lib IN ( SELECT * FROM search_org_list )
-                    AND record = m.source
-                LIMIT 1
-            )
-            OR
-            EXISTS(
-                SELECT 1 FROM biblio.peer_bib_copy_map pr
-                    JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
-                WHERE cp.circ_lib IN ( SELECT * FROM search_org_list )
-                    AND pr.peer_record = m.source
-                LIMIT 1
-            )
-            OR
-            EXISTS(
-                SELECT 1 FROM asset.call_number acn
-                    JOIN asset.uri_call_number_map aucnm ON acn.id = aucnm.call_number
-                    JOIN asset.uri uri ON aucnm.uri = uri.id
-                WHERE NOT acn.deleted AND uri.active AND acn.record = m.source AND acn.owning_lib IN (
-                    SELECT * FROM luri_org_list
-                )
-                LIMIT 1
-            )
-        )
-        SQL
-    }
-    # For single records we want the record id
-    # For metarecords we want NULL or the only record ID.
-    my $agg_record = 'm.source AS record';
+    # Need an array for query parser db function; this gives a better plan
+    # than the ARRAY_AGG(DISTINCT m.source) option as of PostgreSQL 9.1
+    my $agg_records = 'ARRAY[m.source] AS records';
     if ($key =~ /metarecord/) {
-        $agg_record = 'CASE WHEN COUNT(DISTINCT m.source) = 1 THEN FIRST(m.source) ELSE NULL END AS record';
+        # metarecord searches still require the ARRAY_AGG approach
+        $agg_records = 'ARRAY_AGG(DISTINCT m.source) AS records';
     }
 
     my $sql = <<SQL;
-WITH
 $with
 SELECT  $key AS id,
-        $agg_record,
+        $agg_records,
         $rel AS rel,
         $rank AS rank, 
         FIRST(mrd.attrs->'date1') AS tie_break
   FROM  metabib.metarecord_source_map m
         $$flat_plan{from}
-        INNER JOIN metabib.record_attr mrd ON m.source = mrd.id
-        INNER JOIN biblio.record_entry bre ON m.source = bre.id
-        LEFT JOIN config.bib_source cbs ON bre.source = cbs.id
+        $mra_join
   WHERE 1=1
         $flat_where
-        $limit_where
   GROUP BY 1
   ORDER BY 4 $desc $nullpos, 5 DESC $nullpos, 3 DESC
   LIMIT $core_limit
@@ -1126,7 +917,7 @@ sub flatten {
                       . ${spc} x 2 ."AND ${talias}.field IN (". join(',', @field_ids) . ")\n"
                       . "${spc})";
 
-                if ($join_type ne 'INNER') {
+                if ($join_type != 'INNER') {
                     my $NOT = $node->negate ? '' : ' NOT';
                     $where .= "${talias}.id IS$NOT NULL";
                 } elsif ($where ne '') {
@@ -1322,60 +1113,6 @@ sub flatten {
                         $where .= "bre.$datefilter BETWEEN \$_$$\$$cstart\$_$$\$ AND \$_$$\$$cend\$_$$\$";
                     }
                 }
-            } elsif ($filter->name eq 'locations') {
-                if (@{$filter->args} > 0) {
-                    my $spcdepth = $self->plan_level + 5;
-                    $where .= $joiner if $where ne '';
-                    $where .= "(\n"
-                           . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
-                           . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM asset.call_number acn\n"
-                           . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON acn.id = acp.call_number\n"
-                           . ${spc} x ($spcdepth + 2) . "WHERE m.source = acn.record\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
-                           . ${spc} x ($spcdepth + 5) . "AND NOT acn.deleted\n"
-                           . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.location IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
-                           . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
-                           . ${spc} x ($spcdepth + 1) . ")\n"
-                           . ${spc} x ($spcdepth + 1) . ($filter->negate ? 'AND' : 'OR') . "\n"
-                           . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
-                           . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM biblio.peer_bib_copy_map pr\n"
-                           . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON pr.target_copy = acp.id\n"
-                           . ${spc} x ($spcdepth + 2) . "WHERE m.source = pr.peer_record\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
-                           . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.location IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
-                           . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
-                           . ${spc} x ($spcdepth + 1) . ")\n"
-                           . ${spc} x $spcdepth . ")";
-                }
-            } elsif ($filter->name eq 'statuses') {
-                if (@{$filter->args} > 0) {
-                    my $spcdepth = $self->plan_level + 5;
-                    $where .= $joiner if $where ne '';
-                    $where .= "(\n"
-                           . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
-                           . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM asset.call_number acn\n"
-                           . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON acn.id = acp.call_number\n"
-                           . ${spc} x ($spcdepth + 2) . "WHERE m.source = acn.record\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
-                           . ${spc} x ($spcdepth + 5) . "AND NOT acn.deleted\n"
-                           . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.status IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
-                           . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
-                           . ${spc} x ($spcdepth + 1) . ")\n"
-                           . ${spc} x ($spcdepth + 1) . ($filter->negate ? 'AND' : 'OR') . "\n"
-                           . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
-                           . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM biblio.peer_bib_copy_map pr\n"
-                           . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON pr.target_copy = acp.id\n"
-                           . ${spc} x ($spcdepth + 2) . "WHERE m.source = pr.peer_record\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
-                           . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
-                           . ${spc} x ($spcdepth + 5) . "AND acp.status IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
-                           . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
-                           . ${spc} x ($spcdepth + 1) . ")\n"
-                           . ${spc} x $spcdepth . ")";
-                }
             } elsif ($filter->name eq 'bib_source') {
                 if (@{$filter->args} > 0) {
                     $where .= $joiner if $where ne '';
diff --git a/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Publisher/metabib.pm b/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Publisher/metabib.pm
index 96a70cf..423d5d8 100644
--- a/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Publisher/metabib.pm
+++ b/Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Publisher/metabib.pm
@@ -2944,7 +2944,7 @@ sub query_parser_fts {
 
 
     # parse the query and supply any query-level %arg-based defaults
-    # we expect, and make use of, query, debug and core_limit args
+    # we expect, and make use of, query, superpage, superpage_size, debug and core_limit args
     my $query = $parser->new( %args )->parse;
 
     my $config = OpenSRF::Utils::SettingsClient->new();
@@ -2988,15 +2988,194 @@ sub query_parser_fts {
         }
     }
 
-	my $sth = metabib::metarecord_source_map->db_Main->prepare($query->parse_tree->toSQL);
+    # gather the site, if one is specified, defaulting to the in-query version
+	my $ou = $args{org_unit};
+	if (my ($filter) = $query->parse_tree->find_filter('site')) {
+            $ou = $filter->args->[0] if (@{$filter->args});
+    }
+    $ou = actor::org_unit->search( { shortname => $ou } )->next->id if ($ou and $ou !~ /^(-)?\d+$/);
+
+    # gather lasso, as with $ou
+	my $lasso = $args{lasso};
+	if (my ($filter) = $query->parse_tree->find_filter('lasso')) {
+            $lasso = $filter->args->[0] if (@{$filter->args});
+    }
+	$lasso = actor::org_lasso->search( { name => $lasso } )->next->id if ($lasso and $lasso !~ /^\d+$/);
+    $lasso = -$lasso if ($lasso);
+
+
+#    # XXX once we have org_unit containers, we can make user-defined lassos .. WHEEE
+#    # gather user lasso, as with $ou and lasso
+#    my $mylasso = $args{my_lasso};
+#    if (my ($filter) = $query->parse_tree->find_filter('my_lasso')) {
+#            $mylasso = $filter->args->[0] if (@{$filter->args});
+#    }
+#    $mylasso = actor::org_unit->search( { name => $mylasso } )->next->id if ($mylasso and $mylasso !~ /^\d+$/);
+
+
+    # if we have a lasso, go with that, otherwise ... ou
+    $ou = $lasso if ($lasso);
+
+    # gather the preferred OU, if one is specified, as with $ou
+    my $pref_ou = $args{pref_ou};
+	$log->info("pref_ou = $pref_ou");
+	if (my ($filter) = $query->parse_tree->find_filter('pref_ou')) {
+            $pref_ou = $filter->args->[0] if (@{$filter->args});
+    }
+    $pref_ou = actor::org_unit->search( { shortname => $pref_ou } )->next->id if ($pref_ou and $pref_ou !~ /^(-)?\d+$/);
+
+    # get the default $ou if we have nothing
+	$ou = actor::org_unit->search( { parent_ou => undef } )->next->id if (!$ou and !$lasso and !$mylasso);
+
+
+    # XXX when user lassos are here, check to make sure we don't have one -- it'll be passed in the depth, with an ou of 0
+    # gather the depth, if one is specified, defaulting to the in-query version
+	my $depth = $args{depth};
+	if (my ($filter) = $query->parse_tree->find_filter('depth')) {
+            $depth = $filter->args->[0] if (@{$filter->args});
+    }
+	$depth = actor::org_unit->search_where( [{ name => $depth },{ opac_label => $depth }], {limit => 1} )->next->id if ($depth and $depth !~ /^\d+$/);
+
+
+    # gather the limit or default to 10
+	my $limit = $args{check_limit} || 'NULL';
+	if (my ($filter) = $query->parse_tree->find_filter('limit')) {
+            $limit = $filter->args->[0] if (@{$filter->args});
+    }
+	if (my ($filter) = $query->parse_tree->find_filter('check_limit')) {
+            $limit = $filter->args->[0] if (@{$filter->args});
+    }
+
+
+    # gather the offset or default to 0
+	my $offset = $args{skip_check} || $args{offset} || 0;
+	if (my ($filter) = $query->parse_tree->find_filter('offset')) {
+            $offset = $filter->args->[0] if (@{$filter->args});
+    }
+	if (my ($filter) = $query->parse_tree->find_filter('skip_check')) {
+            $offset = $filter->args->[0] if (@{$filter->args});
+    }
+
+
+    # gather the estimation strategy or default to inclusion
+    my $estimation_strategy = $args{estimation_strategy} || 'inclusion';
+	if (my ($filter) = $query->parse_tree->find_filter('estimation_strategy')) {
+            $estimation_strategy = $filter->args->[0] if (@{$filter->args});
+    }
+
+
+    # gather the estimation strategy or default to inclusion
+    my $core_limit = $args{core_limit};
+	if (my ($filter) = $query->parse_tree->find_filter('core_limit')) {
+            $core_limit = $filter->args->[0] if (@{$filter->args});
+    }
+
+
+    # gather statuses, and then forget those if we have an #available modifier
+    my @statuses;
+    if (my ($filter) = $query->parse_tree->find_filter('statuses')) {
+        @statuses = @{$filter->args} if (@{$filter->args});
+    }
+    @statuses = (0,7,12) if ($query->parse_tree->find_modifier('available'));
+
+
+    # gather locations
+    my @location;
+    if (my ($filter) = $query->parse_tree->find_filter('locations')) {
+        @location = @{$filter->args} if (@{$filter->args});
+    }
+
+    # gather location_groups
+    if (my ($filter) = $query->parse_tree->find_filter('location_groups')) {
+        my @loc_groups = @{$filter->args} if (@{$filter->args});
+        
+        # collect the mapped locations and add them to the locations() filter
+        if (@loc_groups) {
+
+            my $cstore = OpenSRF::AppSession->create( 'open-ils.cstore' );
+            my $maps = $cstore->request(
+                'open-ils.cstore.direct.asset.copy_location_group_map.search.atomic',
+                {lgroup => \@loc_groups})->gather(1);
+
+            push(@location, $_->location) for @$maps;
+        }
+    }
+
+
+    my $param_check = $limit || $query->superpage_size || 'NULL';
+    my $param_offset = $offset || 'NULL';
+    my $param_limit = $core_limit || 'NULL';
+
+    my $sp = $query->superpage || 1;
+    if ($sp > 1) {
+        $param_offset = ($sp - 1) * $sp_size;
+    }
+
+	my $param_search_ou = $ou;
+	my $param_depth = $depth; $param_depth = 'NULL' unless (defined($depth) and length($depth) > 0 );
+	my $param_core_query = "\$core_query_$$\$" . $query->parse_tree->toSQL . "\$core_query_$$\$";
+	my $param_statuses = '$${' . join(',', map { s/\$//go; "\"$_\""} @statuses) . '}$$';
+	my $param_locations = '$${' . join(',', map { s/\$//go; "\"$_\""} @location) . '}$$';
+	my $staff = ($self->api_name =~ /staff/ or $query->parse_tree->find_modifier('staff')) ? "'t'" : "'f'";
+	my $metarecord = ($self->api_name =~ /metabib/ or $query->parse_tree->find_modifier('metabib') or $query->parse_tree->find_modifier('metarecord')) ? "'t'" : "'f'";
+	my $param_pref_ou = $pref_ou || 'NULL';
+
+	my $sth = metabib::metarecord_source_map->db_Main->prepare(<<"    SQL");
+        SELECT  * -- bib search: $args{query}
+          FROM  search.query_parser_fts(
+                    $param_search_ou\:\:INT,
+                    $param_depth\:\:INT,
+                    $param_core_query\:\:TEXT,
+                    $param_statuses\:\:INT[],
+                    $param_locations\:\:INT[],
+                    $param_offset\:\:INT,
+                    $param_check\:\:INT,
+                    $param_limit\:\:INT,
+                    $metarecord\:\:BOOL,
+                    $staff\:\:BOOL,
+                    $param_pref_ou\:\:INT
+                );
+    SQL
+
     $sth->execute;
 
     my $recs = $sth->fetchall_arrayref({});
-	$log->debug("Search yielded ".scalar(@$recs)." checked, visible results.",DEBUG);
+    my $summary_row = pop @$recs;
 
-    $client->respond({visible => scalar(@$recs)});
+    my $total    = $$summary_row{total};
+    my $checked  = $$summary_row{checked};
+    my $visible  = $$summary_row{visible};
+    my $deleted  = $$summary_row{deleted};
+    my $excluded = $$summary_row{excluded};
+
+    my $estimate = $visible;
+    if ( $total > $checked && $checked ) {
+
+        $$summary_row{hit_estimate} = FTS_paging_estimate($self, $client, $checked, $visible, $excluded, $deleted, $total);
+        $estimate = $$summary_row{estimated_hit_count} = $$summary_row{hit_estimate}{$estimation_strategy};
+
+    }
+
+    delete $$summary_row{id};
+    delete $$summary_row{rel};
+    delete $$summary_row{record};
+
+    if (defined($simple_plan)) {
+        $$summary_row{complex_query} = $simple_plan ? 0 : 1;
+    } else {
+        $$summary_row{complex_query} = $query->simple_plan ? 0 : 1;
+    }
+
+    $client->respond( $summary_row );
+
+	$log->debug("Search yielded ".scalar(@$recs)." checked, visible results with an approximate visible total of $estimate.",DEBUG);
 
 	for my $rec (@$recs) {
+        delete $$rec{checked};
+        delete $$rec{visible};
+        delete $$rec{excluded};
+        delete $$rec{deleted};
+        delete $$rec{total};
         $$rec{rel} = sprintf('%0.3f',$$rec{rel});
 
 		$client->respond( $rec );

-----------------------------------------------------------------------

Summary of changes:
 Open-ILS/examples/opensrf.xml.example              |   23 ++-
 .../lib/OpenILS/Application/Search/Biblio.pm       |  186 ++++++++----
 .../Application/Storage/Driver/Pg/QueryParser.pm   |  289 +-----------------
 .../Application/Storage/Publisher/metabib.pm       |  187 +++++++++++-
 Open-ILS/src/sql/Pg/002.schema.config.sql          |    2 +-
 Open-ILS/src/sql/Pg/040.schema.asset.sql           |    2 +-
 Open-ILS/src/sql/Pg/300.schema.staged_search.sql   |  327 ++++++++++++++++++++
 ..._fts.sql => 0786.schema.qp_fts_stored_proc.sql} |   18 +-
 8 files changed, 683 insertions(+), 351 deletions(-)
 copy Open-ILS/src/sql/Pg/upgrade/{0704.schema.query_parser_fts.sql => 0786.schema.qp_fts_stored_proc.sql} (96%)


hooks/post-receive
-- 
Evergreen ILS


More information about the open-ils-commits mailing list