[open-ils-commits] [GIT] Evergreen ILS branch master updated. c68550260497e050307b49b0743e339f96417b53

Evergreen Git git at git.evergreen-ils.org
Tue Sep 4 18:19:44 EDT 2018


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Evergreen ILS".

The branch, master has been updated
       via  c68550260497e050307b49b0743e339f96417b53 (commit)
       via  32f9b0303588a571a29efb000d49b132e8bc0f7a (commit)
       via  889898936925335c545091721b6db008148625b3 (commit)
       via  96bd60b86409c53da3238df335e1a9ad299cc29e (commit)
       via  5da0ea080077816154cb5d3d9d7bb32932e996dd (commit)
      from  1e6cbaca69712d708bf4e720e14ac78e1dc24ab6 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit c68550260497e050307b49b0743e339f96417b53
Author: Galen Charlton <gmc at equinoxinitiative.org>
Date:   Tue Sep 4 18:35:28 2018 -0400

    LP#1514085: stamp schema update
    
    Signed-off-by: Galen Charlton <gmc at equinoxinitiative.org>

diff --git a/Open-ILS/src/sql/Pg/002.schema.config.sql b/Open-ILS/src/sql/Pg/002.schema.config.sql
index c6aab8e..c7771c0 100644
--- a/Open-ILS/src/sql/Pg/002.schema.config.sql
+++ b/Open-ILS/src/sql/Pg/002.schema.config.sql
@@ -92,7 +92,7 @@ CREATE TRIGGER no_overlapping_deps
     BEFORE INSERT OR UPDATE ON config.db_patch_dependencies
     FOR EACH ROW EXECUTE PROCEDURE evergreen.array_overlap_check ('deprecates');
 
-INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1125', :eg_version); -- khuckins/kmlussier/dbwells
+INSERT INTO config.upgrade_log (version, applied_to) VALUES ('1126', :eg_version); -- berick/gmcharlt
 
 CREATE TABLE config.bib_source (
 	id		SERIAL	PRIMARY KEY,
diff --git a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql b/Open-ILS/src/sql/Pg/upgrade/1126.schema.vandelay-state-tracking.sql
similarity index 99%
rename from Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql
rename to Open-ILS/src/sql/Pg/upgrade/1126.schema.vandelay-state-tracking.sql
index bce64d0..10e658f 100644
--- a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql
+++ b/Open-ILS/src/sql/Pg/upgrade/1126.schema.vandelay-state-tracking.sql
@@ -1,7 +1,6 @@
-
 BEGIN;
 
--- SELECT evergreen.upgrade_deps_block_check('XXXX', :eg_version);
+SELECT evergreen.upgrade_deps_block_check('1126', :eg_version);
 
 CREATE TABLE vandelay.session_tracker (
     id          BIGSERIAL PRIMARY KEY,

commit 32f9b0303588a571a29efb000d49b132e8bc0f7a
Author: Galen Charlton <gmc at equinoxinitiative.org>
Date:   Tue Sep 4 18:33:09 2018 -0400

    LP#1514085: add to release notes with a server configuration note
    
    Signed-off-by: Galen Charlton <gmc at equinoxinitiative.org>

diff --git a/docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc b/docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc
index 81bf1a0..e857e13 100644
--- a/docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc
+++ b/docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc
@@ -17,3 +17,11 @@ session by the value stored in the "session_key" field.
 The table tracks other potentially useful data, like the staff member
 and workstation where the action was performed.
 
+Upgrade notes
++++++++++++++
+Users of NGINX as a reverse proxy may need to set a suitable
+`client_max_body_size` value in the NGINX configuration so that large
+MARC record uploads are not truncated. Note that this would have
+always been necessary, but since this feature allows larger files
+to be more reliably queued and imported, the need to set `client_max_body_size`
+became more apparent.

commit 889898936925335c545091721b6db008148625b3
Author: Galen Charlton <gmc at equinoxinitiative.org>
Date:   Tue Sep 4 17:46:49 2018 -0400

    LP#1514085: sync schema update script to reflect changes in master
    
    Signed-off-by: Galen Charlton <gmc at equinoxinitiative.org>

diff --git a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql b/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql
index 8c2f765..bce64d0 100644
--- a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql
+++ b/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql
@@ -367,6 +367,37 @@ BEGIN
         -- do nothing
     END;
 
+    -- propagate preferred name values from the source user to the
+    -- destination user, but only when values are not being replaced.
+    WITH susr AS (SELECT * FROM actor.usr WHERE id = src_usr)
+    UPDATE actor.usr SET 
+        pref_prefix = 
+            COALESCE(pref_prefix, (SELECT pref_prefix FROM susr)),
+        pref_first_given_name = 
+            COALESCE(pref_first_given_name, (SELECT pref_first_given_name FROM susr)),
+        pref_second_given_name = 
+            COALESCE(pref_second_given_name, (SELECT pref_second_given_name FROM susr)),
+        pref_family_name = 
+            COALESCE(pref_family_name, (SELECT pref_family_name FROM susr)),
+        pref_suffix = 
+            COALESCE(pref_suffix, (SELECT pref_suffix FROM susr))
+    WHERE id = dest_usr;
+
+    -- Copy and deduplicate name keywords
+    -- String -> array -> rows -> DISTINCT -> array -> string
+    WITH susr AS (SELECT * FROM actor.usr WHERE id = src_usr),
+         dusr AS (SELECT * FROM actor.usr WHERE id = dest_usr)
+    UPDATE actor.usr SET name_keywords = (
+        WITH keywords AS (
+            SELECT DISTINCT UNNEST(
+                REGEXP_SPLIT_TO_ARRAY(
+                    COALESCE((SELECT name_keywords FROM susr), '') || ' ' ||
+                    COALESCE((SELECT name_keywords FROM dusr), ''),  E'\\s+'
+                )
+            ) AS parts
+        ) SELECT ARRAY_TO_STRING(ARRAY_AGG(kw.parts), ' ') FROM keywords kw
+    ) WHERE id = dest_usr;
+
     -- Finally, delete the source user
     DELETE FROM actor.usr WHERE id = src_usr;
 

commit 96bd60b86409c53da3238df335e1a9ad299cc29e
Author: Bill Erickson <berickxx at gmail.com>
Date:   Thu Aug 23 14:17:11 2018 -0400

    LP#1514085 Release notes for vandelay session tracking
    
    Signed-off-by: Bill Erickson <berickxx at gmail.com>
    Signed-off-by: Galen Charlton <gmc at equinoxinitiative.org>

diff --git a/docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc b/docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc
new file mode 100644
index 0000000..81bf1a0
--- /dev/null
+++ b/docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc
@@ -0,0 +1,19 @@
+Asynchronous Vandelay Imports
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Vandelay imports are now monitored from the browser client asynchronously,
+meaning the client requests updates from the server instead of waiting for 
+the server to respond to the original import request.  This changes allows 
+for incremental progress updates in the browser client.
+
+New Database Table
+++++++++++++++++++
+
+This adds a new database table vandelay.session_tracker for tracking
+in-progress vandelay upload activity.  A new tracker row is added for
+each of "upload", "enqueue", and "import" actions, linked for a given
+session by the value stored in the "session_key" field.
+
+The table tracks other potentially useful data, like the staff member
+and workstation where the action was performed.
+

commit 5da0ea080077816154cb5d3d9d7bb32932e996dd
Author: Bill Erickson <berickxx at gmail.com>
Date:   Thu Jun 28 12:38:57 2018 -0400

    LP#1514085 Vandelay in-database session tracking
    
    Adds a new DB table vandelay.session_tracker for monitoring progress on
    Vandelay enqueue and import sessions.
    
    Enqueue and import APIs get a new option to exit early, returning the
    newly created tracker object, so the caller can monitor the tracker
    instead of listening to streamed responses, which are not supported in
    browser client Dojo interfaces.
    
    Teach the existing Dojo Vandelay UI to exit early on enqueu & export and
    to poll for tracker data in lieu of waiting for streamed progress data.
    
    On user merge / purge, trackers are migrated to the destination user.
    
    Signed-off-by: Bill Erickson <berickxx at gmail.com>
    Signed-off-by: Galen Charlton <gmc at equinoxinitiative.org>

diff --git a/Open-ILS/examples/fm_IDL.xml b/Open-ILS/examples/fm_IDL.xml
index a921349..300c910 100644
--- a/Open-ILS/examples/fm_IDL.xml
+++ b/Open-ILS/examples/fm_IDL.xml
@@ -804,6 +804,50 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
 		</permacrud>
 	</class>
 
+	<class id="vst" controller="open-ils.cstore open-ils.pcrud" 
+		oils_obj:fieldmapper="vandelay::session_tracker" 
+		oils_persist:tablename="vandelay.session_tracker" 
+		reporter:label="Vandelay Session Tracker">
+		<fields oils_persist:primary="id" oils_persist:sequence="vandelay.session_tracker_id_seq">
+			<field reporter:label="ID" name="id" reporter:datatype="id"/>
+			<field reporter:label="Session Key" name="session_key" reporter:datatype="text"/>
+			<field reporter:label="Name" name="name" reporter:datatype="text"/>
+			<field reporter:label="User" name="usr" reporter:datatype="link"/>
+			<field reporter:label="Workstation" name="workstation" reporter:datatype="link"/>
+			<field reporter:label="Record Type" name="record_type" reporter:datatype="text"/>
+			<!-- queue doesn't use datatype 'link' because it may refer to a bib
+					 queue or an auth queue and there's no IDL class for vandelay.queue .
+					 parent table.
+			-->
+			<field reporter:label="Source Queue" name="queue" reporter:datatype="int"/>
+			<field reporter:label="Create Time" name="create_time" reporter:datatype="timestsamp"/>
+			<field reporter:label="Update Time" name="update_time" reporter:datatype="timestsamp"/>
+			<field reporter:label="State" name="state" reporter:datatype="text"/>
+			<field reporter:label="Action Type" name="action_type" reporter:datatype="text"/>
+			<field reporter:label="Total Actions" name="total_actions" reporter:datatype="int"/>
+			<field reporter:label="Actions Performed" name="actions_performed" reporter:datatype="int"/>
+		</fields>
+		<links>
+			<link field="usr" reltype="has_a" key="id" map="" class="au"/>
+			<link field="workstation" reltype="has_a" key="id" map="" class="aws"/>
+		</links>
+		<permacrud xmlns="http://open-ils.org/spec/opensrf/IDL/permacrud/v1">
+			<actions>
+				<!-- created by the API -->
+				<retrieve global_required="true"
+					permission="CREATE_BIB_IMPORT_QUEUE CREATE_AUTHORITY_IMPORT_QUEUE"/>
+				<update global_required="true"
+					permission="UPDATE_BIB_IMPORT_QUEUE UPDATE_AUTHORITY_IMPORT_QUEUE">
+					<context link="workstation" field="owning_lib"/>
+				</update>
+				<delete 
+					permission="DELETE_BIB_IMPORT_QUEUE DELETE_AUTHORITY_IMPORT_QUEUE">
+					<context link="workstation" field="owning_lib"/>
+				</delete>
+			</actions>
+		</permacrud>
+	</class>
+
 	<class id="auoi" controller="open-ils.cstore" oils_obj:fieldmapper="actor::usr_org_unit_opt_in" oils_persist:tablename="actor.usr_org_unit_opt_in" reporter:label="User Sharing Opt-in">
 		<fields oils_persist:primary="id" oils_persist:sequence="actor.usr_org_unit_opt_in_id_seq">
 			<field reporter:label="Opt-in ID" name="id" reporter:datatype="id"/>
diff --git a/Open-ILS/src/perlmods/lib/OpenILS/Application/Vandelay.pm b/Open-ILS/src/perlmods/lib/OpenILS/Application/Vandelay.pm
index ba64b14..0647b33 100644
--- a/Open-ILS/src/perlmods/lib/OpenILS/Application/Vandelay.pm
+++ b/Open-ILS/src/perlmods/lib/OpenILS/Application/Vandelay.pm
@@ -17,6 +17,7 @@ use Time::HiRes qw(time);
 use OpenSRF::Utils::Logger qw/$logger/;
 use MIME::Base64;
 use XML::LibXML;
+use Digest::MD5 qw/md5_hex/;
 use OpenILS::Const qw/:const/;
 use OpenILS::Application::AppUtils;
 use OpenILS::Application::Cat::BibCommon;
@@ -238,6 +239,8 @@ sub process_spool {
     my $purpose = shift;
     my $filename = shift;
     my $bib_source = shift;
+    my $session_name = shift;
+    my $exit_early = shift;
 
     $client->max_chunk_count($self->{max_bundle_count}) if (!$client->can('max_bundle_count') && $self->{max_bundle_count});
 
@@ -272,6 +275,17 @@ sub process_spool {
 
     $logger->info("vandelay spooling $fingerprint purpose=$purpose file=$filename");
 
+    my ($tracker, $tevt) = create_session_tracker(
+        $e->requestor->id, $e->requestor->wsid, $fingerprint, 
+        $session_name, $type, $queue_id, 'enqueue');
+
+    if (!$tracker) {
+        $e->rollback;
+        return $tevt;
+    }
+
+    $client->respond_complete($tracker) if $exit_early;
+
     my $marctype = 'USMARC'; 
 
     open F, $filename;
@@ -309,11 +323,15 @@ sub process_spool {
                 next;
             }
 
-            if($self->api_name =~ /stream_results/ and $qrec) {
-                $client->respond($qrec->id)
-            } else {
-                $client->respond($count) if (++$count % $response_scale) == 0;
-                $response_scale *= 10 if ($count == ($response_scale * 10));
+            return $tevt if $tevt = increment_session_tracker($tracker);
+
+            if (!$exit_early) { # avoid unnecessary responses
+                if($self->api_name =~ /stream_results/ and $qrec) {
+                    $client->respond($qrec->id)
+                } else {
+                    $client->respond($count) if (++$count % $response_scale) == 0;
+                    $response_scale *= 10 if ($count == ($response_scale * 10));
+                }
             }
         } catch Error with {
             my $error = shift;
@@ -321,6 +339,9 @@ sub process_spool {
         }
     }
 
+    $tracker->state('complete');
+    $e->update_vandelay_session_tracker($tracker) or return $e->die_event;
+
     $e->commit;
     unlink($filename);
     $cache->delete_cache('vandelay_import_spool_' . $fingerprint) if $fingerprint;
@@ -883,6 +904,62 @@ sub queued_records_with_matches {
     return [ map {$_->{queued_record}} @$data ];
 }
 
+# Returns (tracker, err_event)
+# Runs within its own transaction.
+sub create_session_tracker {
+    my ($user_id, $ws_id, $key, $name, $type, $queue_id, $action, $total_acts) = @_;
+    my $e = new_editor(xact => 1);
+
+    if ($key) {
+        # if other trackers exist for this key, adopt the name
+        my $existing = 
+            $e->search_vandelay_session_tracker({session_key => $key})->[0];
+        $name = $existing->name if $name;
+
+    } else {
+        # anonymous tracker
+        $key = md5_hex(time."$$".rand());
+    }
+
+    my $tracker = Fieldmapper::vandelay::session_tracker->new;
+    $tracker->session_key($key);
+    $tracker->name($name || $key);
+    $tracker->usr($user_id);
+    $tracker->workstation($ws_id);
+    $tracker->record_type($type);
+    $tracker->queue($queue_id);
+    $tracker->action_type($action);
+    $tracker->total_actions($total_acts) if $total_acts;
+
+    # caller responsible for rolling back transaction
+    return (undef, $e->die_event) unless
+        $e->create_vandelay_session_tracker($tracker);
+
+    # Re-fetch to ensure we have all defaults applied for future updates.
+    return (undef, $e->die_event) unless 
+        $tracker = $e->retrieve_vandelay_session_tracker($tracker->id);
+
+    $e->commit;
+
+    return ($tracker);
+}
+
+# Increment the actions_performed count.
+# Must happen in its own transaction
+# Returns undef on success, Event on error
+sub increment_session_tracker {
+    my $tracker = shift;
+    my $amount = shift || 1;
+    my $e = new_editor(xact => 1);
+    $tracker->update_time('now');
+    $tracker->actions_performed($tracker->actions_performed + $amount);
+    return $e->die_event unless 
+        $e->update_vandelay_session_tracker($tracker);
+
+    $e->commit;
+    return undef;
+}
+
 
 # cache of import item org unit settings.  
 # used in apply_import_item_defaults() below, 
@@ -896,13 +973,15 @@ sub import_record_list_impl {
     my $type = $self->{record_type};
     my %queues;
     %item_defaults_cache = ();
+    my $exit_early = $args->{exit_early};
 
     my $report_args = {
         progress => 1,
         step => 1,
         conn => $conn,
         total => scalar(@$rec_ids),
-        report_all => $$args{report_all}
+        report_all => $$args{report_all},
+        exit_early => $exit_early
     };
 
     $conn->max_chunk_count(1) if (!$conn->can('max_bundle_size') && $conn->can('max_chunk_size') && $$args{report_all});
@@ -947,6 +1026,9 @@ sub import_record_list_impl {
         $rec_class = 'vqar';
     }
 
+
+    my $tracker;
+    my $tevt;
     my $new_rec_perm_cache;
     my @success_rec_ids;
     for my $rec_id (@$rec_ids) {
@@ -975,6 +1057,28 @@ sub import_record_list_impl {
             next;
         }
 
+        if (!$tracker) {
+            # Create the import tracker using the queue of the first
+            # retrieved record.  I'm fairly certain in practice all
+            # lists of records for import come from a single queue.
+            # We could get the queue from the previously created
+            # 'enqueue' tracker, but this is a safetly valve to handle
+            # imports where no enqueue tracker exists, e.g. records
+            # enqueued pre-upgrade.
+            ($tracker, $tevt) = create_session_tracker(
+                $e->requestor->id, $e->requestor->wsid, $args->{session_key}, 
+                undef, $type, $rec->queue, 'import', scalar(@$rec_ids));
+
+            if (!$tracker) {
+                $e->rollback;
+                return $tevt;
+            }
+
+            $report_args->{tracker} = $tracker;
+
+            $conn->respond_complete($tracker) if $exit_early;
+        }
+
         if($rec->import_time) {
             # if the record is already imported, that means it may have 
             # un-imported copies.  Add to success list for later processing.
@@ -1219,8 +1323,17 @@ sub import_record_list_impl {
         $e->rollback;
     }
 
-    # import the copies
-    import_record_asset_list_impl($conn, \@success_rec_ids, $requestor, $args) if @success_rec_ids;
+    import_record_asset_list_impl($conn, 
+        \@success_rec_ids, $requestor, $args, $tracker) if @success_rec_ids;
+
+    if ($tracker) { # there are edge cases where it may not exist
+        my $e = new_editor(xact => 1);
+        $e->requestor($requestor);
+        $tracker->update_time('now');
+        $tracker->state('complete');
+        $e->update_vandelay_session_tracker($tracker) or return $e->die_event;
+        $e->commit;
+    }
 
     $conn->respond({total => $$report_args{total}, progress => $$report_args{progress}});
     return undef;
@@ -1337,10 +1450,20 @@ sub finish_rec_import_attempt {
     my $evt = $$args{evt};
     my $rec = $$args{rec};
     my $e = $$args{e};
+    my $tracker = $$args{tracker};
+    my $exit_early = $$args{exit_early};
 
     my $error = $$args{import_error};
     $error = 'general.unknown' if $evt and not $error;
 
+    # Note the tracker is updated regardless of whether the individual
+    # record import succeeded.  It's only a failed tracker if the
+    # entire process fails.
+    if ($tracker) { # tracker may be undef in rec-not-found situations
+        my $tevt = increment_session_tracker($tracker);
+        return $tevt if $tevt;
+    }
+
     # error tracking
     if($rec) {
 
@@ -1362,6 +1485,7 @@ sub finish_rec_import_attempt {
             $e->$method($rec) and $e->commit or $e->rollback;
 
         } else {
+
             # commit the successful import
             $e->commit;
         }
@@ -1371,17 +1495,19 @@ sub finish_rec_import_attempt {
         $e->rollback;
     }
         
-    # respond to client
-    if($$args{report_all} or ($$args{progress} % $$args{step}) == 0) {
-        $$args{conn}->respond({
-            total => $$args{total}, 
-            progress => $$args{progress}, 
-            imported => ($rec) ? $rec->id : undef,
-            import_error => $error,
-            no_import => $$args{no_import},
-            err_event => $evt
-        });
-        $$args{step} *= 2 unless $$args{step} == 256;
+    # respond to client unless we've already responded-complete
+    if (!$exit_early) {
+        if($$args{report_all} or ($$args{progress} % $$args{step}) == 0) {
+            $$args{conn}->respond({
+                total => $$args{total}, 
+                progress => $$args{progress}, 
+                imported => ($rec) ? $rec->id : undef,
+                import_error => $error,
+                no_import => $$args{no_import},
+                err_event => $evt
+            });
+            $$args{step} *= 2 unless $$args{step} == 256;
+        }
     }
 
     $$args{progress}++;
@@ -1590,7 +1716,7 @@ sub retrieve_queue_summary {
 # Given a list of queued record IDs, imports all items attached to those records
 # --------------------------------------------------------------------------------
 sub import_record_asset_list_impl {
-    my($conn, $rec_ids, $requestor, $args) = @_;
+    my($conn, $rec_ids, $requestor, $args, $tracker) = @_;
 
     my $roe = new_editor(xact=> 1, requestor => $requestor);
 
@@ -1615,6 +1741,14 @@ sub import_record_asset_list_impl {
         in_count => 0,
     };
 
+    if ($tracker && @$rec_ids) {
+        if (my $tevt = # assignment
+            increment_session_tracker($tracker, scalar(@$rec_ids))) {
+            $roe->rollback;
+            return $tevt;
+        }
+    }
+
     for my $rec_id (@$rec_ids) {
         my $rec = $roe->retrieve_vandelay_queued_bib_record($rec_id);
         my $item_ids = $roe->search_vandelay_import_item(
@@ -1622,6 +1756,13 @@ sub import_record_asset_list_impl {
             {idlist=>1}
         );
 
+        if ($tracker) { # increment per record
+            if (my $tevt = increment_session_tracker($tracker)) {
+                $roe->rollback;
+                return $tevt;
+            }
+        }
+
         # if any items have no call_number label and a value should be
         # applied automatically (via org settings), we want to use the same
         # call number label for every copy per org per record.
diff --git a/Open-ILS/src/sql/Pg/012.schema.vandelay.sql b/Open-ILS/src/sql/Pg/012.schema.vandelay.sql
index d60e01b..6f48276 100644
--- a/Open-ILS/src/sql/Pg/012.schema.vandelay.sql
+++ b/Open-ILS/src/sql/Pg/012.schema.vandelay.sql
@@ -2251,5 +2251,57 @@ $$ LANGUAGE PLPGSQL;
 --
 --INSERT INTO vandelay.authority_attr_definition ( code, description, xpath, ident ) VALUES ('rec_identifier','Identifier','//*[@tag="001"]', TRUE);
 
+
+CREATE TABLE vandelay.session_tracker (
+    id          BIGSERIAL PRIMARY KEY,
+
+    -- string of characters (e.g. md5) used for linking trackers
+    -- of different actions into a series.  There can be multiple
+    -- session_keys of each action type, creating the opportunity
+    -- to link multiple action trackers into a single session.
+    session_key TEXT NOT NULL,
+
+    -- optional user-supplied name
+    name        TEXT NOT NULL, 
+
+    usr         INTEGER NOT NULL REFERENCES actor.usr(id)
+                DEFERRABLE INITIALLY DEFERRED,
+
+    -- org unit can be derived from WS
+    workstation INTEGER NOT NULL REFERENCES actor.workstation(id)
+                ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
+
+    -- bib/auth
+    record_type vandelay.bib_queue_queue_type NOT NULL DEFAULT 'bib',
+
+    -- Queue defines the source of the data, it does not necessarily
+    -- mean that an action is being performed against an entire queue.
+    -- E.g. some imports are misc. lists of record IDs, but they always 
+    -- come from one queue.
+    -- No foreign key -- could be auth or bib queue.
+    queue       BIGINT NOT NULL,
+
+    create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+    update_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+
+    state       TEXT NOT NULL DEFAULT 'active',
+
+    action_type TEXT NOT NULL DEFAULT 'enqueue', -- import
+
+    -- total number of tasks to perform / loosely defined
+    -- could be # of recs to import or # of recs + # of copies 
+    -- depending on the import context
+    total_actions INTEGER NOT NULL DEFAULT 0,
+
+    -- total number of tasked performed so far
+    actions_performed INTEGER NOT NULL DEFAULT 0,
+
+    CONSTRAINT vand_tracker_valid_state 
+        CHECK (state IN ('active','error','complete')),
+
+    CONSTRAINT vand_tracker_valid_action_type
+        CHECK (action_type IN ('upload', 'enqueue', 'import'))
+);
+
 COMMIT;
 
diff --git a/Open-ILS/src/sql/Pg/999.functions.global.sql b/Open-ILS/src/sql/Pg/999.functions.global.sql
index da91959..65dcdae 100644
--- a/Open-ILS/src/sql/Pg/999.functions.global.sql
+++ b/Open-ILS/src/sql/Pg/999.functions.global.sql
@@ -195,6 +195,8 @@ BEGIN
 		END LOOP;
 	END LOOP;
 
+    UPDATE vandelay.session_tracker SET usr = dest_usr WHERE usr = src_usr;
+
     -- money.*
     PERFORM actor.usr_merge_rows('money.collections_tracker', 'usr', src_usr, dest_usr);
     PERFORM actor.usr_merge_rows('money.collections_tracker', 'collector', src_usr, dest_usr);
@@ -712,6 +714,8 @@ BEGIN
 		END LOOP;
 	END LOOP;
 
+    UPDATE vandelay.session_tracker SET usr = dest_usr WHERE usr = src_usr;
+
     -- NULL-ify addresses last so other cleanup (e.g. circ anonymization)
     -- can access the information before deletion.
 	UPDATE actor.usr SET
diff --git a/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql b/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql
new file mode 100644
index 0000000..8c2f765
--- /dev/null
+++ b/Open-ILS/src/sql/Pg/upgrade/XXXX.schema.vandelay-state-tracking.sql
@@ -0,0 +1,707 @@
+
+BEGIN;
+
+-- SELECT evergreen.upgrade_deps_block_check('XXXX', :eg_version);
+
+CREATE TABLE vandelay.session_tracker (
+    id          BIGSERIAL PRIMARY KEY,
+
+    -- string of characters (e.g. md5) used for linking trackers
+    -- of different actions into a series.  There can be multiple
+    -- session_keys of each action type, creating the opportunity
+    -- to link multiple action trackers into a single session.
+    session_key TEXT NOT NULL,
+
+    -- optional user-supplied name
+    name        TEXT NOT NULL, 
+
+    usr         INTEGER NOT NULL REFERENCES actor.usr(id)
+                DEFERRABLE INITIALLY DEFERRED,
+
+    -- org unit can be derived from WS
+    workstation INTEGER NOT NULL REFERENCES actor.workstation(id)
+                ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
+
+    -- bib/auth
+    record_type vandelay.bib_queue_queue_type NOT NULL DEFAULT 'bib',
+
+    -- Queue defines the source of the data, it does not necessarily
+    -- mean that an action is being performed against an entire queue.
+    -- E.g. some imports are misc. lists of record IDs, but they always 
+    -- come from one queue.
+    -- No foreign key -- could be auth or bib queue.
+    queue       BIGINT NOT NULL,
+
+    create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+    update_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+
+    state       TEXT NOT NULL DEFAULT 'active',
+
+    action_type TEXT NOT NULL DEFAULT 'enqueue', -- import
+
+    -- total number of tasks to perform / loosely defined
+    -- could be # of recs to import or # of recs + # of copies 
+    -- depending on the import context
+    total_actions INTEGER NOT NULL DEFAULT 0,
+
+    -- total number of tasked performed so far
+    actions_performed INTEGER NOT NULL DEFAULT 0,
+
+    CONSTRAINT vand_tracker_valid_state 
+        CHECK (state IN ('active','error','complete')),
+
+    CONSTRAINT vand_tracker_valid_action_type
+        CHECK (action_type IN ('upload', 'enqueue', 'import'))
+);
+
+
+CREATE OR REPLACE FUNCTION actor.usr_merge( src_usr INT, dest_usr INT, del_addrs BOOLEAN, del_cards BOOLEAN, deactivate_cards BOOLEAN ) RETURNS VOID AS $$
+DECLARE
+	suffix TEXT;
+	bucket_row RECORD;
+	picklist_row RECORD;
+	queue_row RECORD;
+	folder_row RECORD;
+BEGIN
+
+    -- do some initial cleanup 
+    UPDATE actor.usr SET card = NULL WHERE id = src_usr;
+    UPDATE actor.usr SET mailing_address = NULL WHERE id = src_usr;
+    UPDATE actor.usr SET billing_address = NULL WHERE id = src_usr;
+
+    -- actor.*
+    IF del_cards THEN
+        DELETE FROM actor.card where usr = src_usr;
+    ELSE
+        IF deactivate_cards THEN
+            UPDATE actor.card SET active = 'f' WHERE usr = src_usr;
+        END IF;
+        UPDATE actor.card SET usr = dest_usr WHERE usr = src_usr;
+    END IF;
+
+
+    IF del_addrs THEN
+        DELETE FROM actor.usr_address WHERE usr = src_usr;
+    ELSE
+        UPDATE actor.usr_address SET usr = dest_usr WHERE usr = src_usr;
+    END IF;
+
+    UPDATE actor.usr_note SET usr = dest_usr WHERE usr = src_usr;
+    -- dupes are technically OK in actor.usr_standing_penalty, should manually delete them...
+    UPDATE actor.usr_standing_penalty SET usr = dest_usr WHERE usr = src_usr;
+    PERFORM actor.usr_merge_rows('actor.usr_org_unit_opt_in', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('actor.usr_setting', 'usr', src_usr, dest_usr);
+
+    -- permission.*
+    PERFORM actor.usr_merge_rows('permission.usr_perm_map', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('permission.usr_object_perm_map', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('permission.usr_grp_map', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('permission.usr_work_ou_map', 'usr', src_usr, dest_usr);
+
+
+    -- container.*
+	
+	-- For each *_bucket table: transfer every bucket belonging to src_usr
+	-- into the custody of dest_usr.
+	--
+	-- In order to avoid colliding with an existing bucket owned by
+	-- the destination user, append the source user's id (in parenthesese)
+	-- to the name.  If you still get a collision, add successive
+	-- spaces to the name and keep trying until you succeed.
+	--
+	FOR bucket_row in
+		SELECT id, name
+		FROM   container.biblio_record_entry_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.biblio_record_entry_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = bucket_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	FOR bucket_row in
+		SELECT id, name
+		FROM   container.call_number_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.call_number_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = bucket_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	FOR bucket_row in
+		SELECT id, name
+		FROM   container.copy_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.copy_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = bucket_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	FOR bucket_row in
+		SELECT id, name
+		FROM   container.user_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.user_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = bucket_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	UPDATE container.user_bucket_item SET target_user = dest_usr WHERE target_user = src_usr;
+
+    -- vandelay.*
+	-- transfer queues the same way we transfer buckets (see above)
+	FOR queue_row in
+		SELECT id, name
+		FROM   vandelay.queue
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  vandelay.queue
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = queue_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+    UPDATE vandelay.session_tracker SET usr = dest_usr WHERE usr = src_usr;
+
+    -- money.*
+    PERFORM actor.usr_merge_rows('money.collections_tracker', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('money.collections_tracker', 'collector', src_usr, dest_usr);
+    UPDATE money.billable_xact SET usr = dest_usr WHERE usr = src_usr;
+    UPDATE money.billing SET voider = dest_usr WHERE voider = src_usr;
+    UPDATE money.bnm_payment SET accepting_usr = dest_usr WHERE accepting_usr = src_usr;
+
+    -- action.*
+    UPDATE action.circulation SET usr = dest_usr WHERE usr = src_usr;
+    UPDATE action.circulation SET circ_staff = dest_usr WHERE circ_staff = src_usr;
+    UPDATE action.circulation SET checkin_staff = dest_usr WHERE checkin_staff = src_usr;
+    UPDATE action.usr_circ_history SET usr = dest_usr WHERE usr = src_usr;
+
+    UPDATE action.hold_request SET usr = dest_usr WHERE usr = src_usr;
+    UPDATE action.hold_request SET fulfillment_staff = dest_usr WHERE fulfillment_staff = src_usr;
+    UPDATE action.hold_request SET requestor = dest_usr WHERE requestor = src_usr;
+    UPDATE action.hold_notification SET notify_staff = dest_usr WHERE notify_staff = src_usr;
+
+    UPDATE action.in_house_use SET staff = dest_usr WHERE staff = src_usr;
+    UPDATE action.non_cataloged_circulation SET staff = dest_usr WHERE staff = src_usr;
+    UPDATE action.non_cataloged_circulation SET patron = dest_usr WHERE patron = src_usr;
+    UPDATE action.non_cat_in_house_use SET staff = dest_usr WHERE staff = src_usr;
+    UPDATE action.survey_response SET usr = dest_usr WHERE usr = src_usr;
+
+    -- acq.*
+    UPDATE acq.fund_allocation SET allocator = dest_usr WHERE allocator = src_usr;
+	UPDATE acq.fund_transfer SET transfer_user = dest_usr WHERE transfer_user = src_usr;
+
+	-- transfer picklists the same way we transfer buckets (see above)
+	FOR picklist_row in
+		SELECT id, name
+		FROM   acq.picklist
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  acq.picklist
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = picklist_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+    UPDATE acq.purchase_order SET owner = dest_usr WHERE owner = src_usr;
+    UPDATE acq.po_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE acq.po_note SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE acq.provider_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE acq.provider_note SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE acq.lineitem_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE acq.lineitem_note SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE acq.lineitem_usr_attr_definition SET usr = dest_usr WHERE usr = src_usr;
+
+    -- asset.*
+    UPDATE asset.copy SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE asset.copy SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE asset.copy_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE asset.call_number SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE asset.call_number SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE asset.call_number_note SET creator = dest_usr WHERE creator = src_usr;
+
+    -- serial.*
+    UPDATE serial.record_entry SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE serial.record_entry SET editor = dest_usr WHERE editor = src_usr;
+
+    -- reporter.*
+    -- It's not uncommon to define the reporter schema in a replica 
+    -- DB only, so don't assume these tables exist in the write DB.
+    BEGIN
+    	UPDATE reporter.template SET owner = dest_usr WHERE owner = src_usr;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+    	UPDATE reporter.report SET owner = dest_usr WHERE owner = src_usr;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+    	UPDATE reporter.schedule SET runner = dest_usr WHERE runner = src_usr;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+		-- transfer folders the same way we transfer buckets (see above)
+		FOR folder_row in
+			SELECT id, name
+			FROM   reporter.template_folder
+			WHERE  owner = src_usr
+		LOOP
+			suffix := ' (' || src_usr || ')';
+			LOOP
+				BEGIN
+					UPDATE  reporter.template_folder
+					SET     owner = dest_usr, name = name || suffix
+					WHERE   id = folder_row.id;
+				EXCEPTION WHEN unique_violation THEN
+					suffix := suffix || ' ';
+					CONTINUE;
+				END;
+				EXIT;
+			END LOOP;
+		END LOOP;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+		-- transfer folders the same way we transfer buckets (see above)
+		FOR folder_row in
+			SELECT id, name
+			FROM   reporter.report_folder
+			WHERE  owner = src_usr
+		LOOP
+			suffix := ' (' || src_usr || ')';
+			LOOP
+				BEGIN
+					UPDATE  reporter.report_folder
+					SET     owner = dest_usr, name = name || suffix
+					WHERE   id = folder_row.id;
+				EXCEPTION WHEN unique_violation THEN
+					suffix := suffix || ' ';
+					CONTINUE;
+				END;
+				EXIT;
+			END LOOP;
+		END LOOP;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+		-- transfer folders the same way we transfer buckets (see above)
+		FOR folder_row in
+			SELECT id, name
+			FROM   reporter.output_folder
+			WHERE  owner = src_usr
+		LOOP
+			suffix := ' (' || src_usr || ')';
+			LOOP
+				BEGIN
+					UPDATE  reporter.output_folder
+					SET     owner = dest_usr, name = name || suffix
+					WHERE   id = folder_row.id;
+				EXCEPTION WHEN unique_violation THEN
+					suffix := suffix || ' ';
+					CONTINUE;
+				END;
+				EXIT;
+			END LOOP;
+		END LOOP;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+
+    -- Finally, delete the source user
+    DELETE FROM actor.usr WHERE id = src_usr;
+
+END;
+$$ LANGUAGE plpgsql;
+
+
+CREATE OR REPLACE FUNCTION actor.usr_purge_data(
+	src_usr  IN INTEGER,
+	specified_dest_usr IN INTEGER
+) RETURNS VOID AS $$
+DECLARE
+	suffix TEXT;
+	renamable_row RECORD;
+	dest_usr INTEGER;
+BEGIN
+
+	IF specified_dest_usr IS NULL THEN
+		dest_usr := 1; -- Admin user on stock installs
+	ELSE
+		dest_usr := specified_dest_usr;
+	END IF;
+
+	-- acq.*
+	UPDATE acq.fund_allocation SET allocator = dest_usr WHERE allocator = src_usr;
+	UPDATE acq.lineitem SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE acq.lineitem SET editor = dest_usr WHERE editor = src_usr;
+	UPDATE acq.lineitem SET selector = dest_usr WHERE selector = src_usr;
+	UPDATE acq.lineitem_note SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE acq.lineitem_note SET editor = dest_usr WHERE editor = src_usr;
+	DELETE FROM acq.lineitem_usr_attr_definition WHERE usr = src_usr;
+
+	-- Update with a rename to avoid collisions
+	FOR renamable_row in
+		SELECT id, name
+		FROM   acq.picklist
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  acq.picklist
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = renamable_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	UPDATE acq.picklist SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE acq.picklist SET editor = dest_usr WHERE editor = src_usr;
+	UPDATE acq.po_note SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE acq.po_note SET editor = dest_usr WHERE editor = src_usr;
+	UPDATE acq.purchase_order SET owner = dest_usr WHERE owner = src_usr;
+	UPDATE acq.purchase_order SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE acq.purchase_order SET editor = dest_usr WHERE editor = src_usr;
+	UPDATE acq.claim_event SET creator = dest_usr WHERE creator = src_usr;
+
+	-- action.*
+	DELETE FROM action.circulation WHERE usr = src_usr;
+	UPDATE action.circulation SET circ_staff = dest_usr WHERE circ_staff = src_usr;
+	UPDATE action.circulation SET checkin_staff = dest_usr WHERE checkin_staff = src_usr;
+	UPDATE action.hold_notification SET notify_staff = dest_usr WHERE notify_staff = src_usr;
+	UPDATE action.hold_request SET fulfillment_staff = dest_usr WHERE fulfillment_staff = src_usr;
+	UPDATE action.hold_request SET requestor = dest_usr WHERE requestor = src_usr;
+	DELETE FROM action.hold_request WHERE usr = src_usr;
+	UPDATE action.in_house_use SET staff = dest_usr WHERE staff = src_usr;
+	UPDATE action.non_cat_in_house_use SET staff = dest_usr WHERE staff = src_usr;
+	DELETE FROM action.non_cataloged_circulation WHERE patron = src_usr;
+	UPDATE action.non_cataloged_circulation SET staff = dest_usr WHERE staff = src_usr;
+	DELETE FROM action.survey_response WHERE usr = src_usr;
+	UPDATE action.fieldset SET owner = dest_usr WHERE owner = src_usr;
+	DELETE FROM action.usr_circ_history WHERE usr = src_usr;
+
+	-- actor.*
+	DELETE FROM actor.card WHERE usr = src_usr;
+	DELETE FROM actor.stat_cat_entry_usr_map WHERE target_usr = src_usr;
+
+	-- The following update is intended to avoid transient violations of a foreign
+	-- key constraint, whereby actor.usr_address references itself.  It may not be
+	-- necessary, but it does no harm.
+	UPDATE actor.usr_address SET replaces = NULL
+		WHERE usr = src_usr AND replaces IS NOT NULL;
+	DELETE FROM actor.usr_address WHERE usr = src_usr;
+	DELETE FROM actor.usr_note WHERE usr = src_usr;
+	UPDATE actor.usr_note SET creator = dest_usr WHERE creator = src_usr;
+	DELETE FROM actor.usr_org_unit_opt_in WHERE usr = src_usr;
+	UPDATE actor.usr_org_unit_opt_in SET staff = dest_usr WHERE staff = src_usr;
+	DELETE FROM actor.usr_setting WHERE usr = src_usr;
+	DELETE FROM actor.usr_standing_penalty WHERE usr = src_usr;
+	UPDATE actor.usr_standing_penalty SET staff = dest_usr WHERE staff = src_usr;
+
+	-- asset.*
+	UPDATE asset.call_number SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE asset.call_number SET editor = dest_usr WHERE editor = src_usr;
+	UPDATE asset.call_number_note SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE asset.copy SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE asset.copy SET editor = dest_usr WHERE editor = src_usr;
+	UPDATE asset.copy_note SET creator = dest_usr WHERE creator = src_usr;
+
+	-- auditor.*
+	DELETE FROM auditor.actor_usr_address_history WHERE id = src_usr;
+	DELETE FROM auditor.actor_usr_history WHERE id = src_usr;
+	UPDATE auditor.asset_call_number_history SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE auditor.asset_call_number_history SET editor  = dest_usr WHERE editor  = src_usr;
+	UPDATE auditor.asset_copy_history SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE auditor.asset_copy_history SET editor  = dest_usr WHERE editor  = src_usr;
+	UPDATE auditor.biblio_record_entry_history SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE auditor.biblio_record_entry_history SET editor  = dest_usr WHERE editor  = src_usr;
+
+	-- biblio.*
+	UPDATE biblio.record_entry SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE biblio.record_entry SET editor = dest_usr WHERE editor = src_usr;
+	UPDATE biblio.record_note SET creator = dest_usr WHERE creator = src_usr;
+	UPDATE biblio.record_note SET editor = dest_usr WHERE editor = src_usr;
+
+	-- container.*
+	-- Update buckets with a rename to avoid collisions
+	FOR renamable_row in
+		SELECT id, name
+		FROM   container.biblio_record_entry_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.biblio_record_entry_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = renamable_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	FOR renamable_row in
+		SELECT id, name
+		FROM   container.call_number_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.call_number_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = renamable_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	FOR renamable_row in
+		SELECT id, name
+		FROM   container.copy_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.copy_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = renamable_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	FOR renamable_row in
+		SELECT id, name
+		FROM   container.user_bucket
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  container.user_bucket
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = renamable_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+	DELETE FROM container.user_bucket_item WHERE target_user = src_usr;
+
+	-- money.*
+	DELETE FROM money.billable_xact WHERE usr = src_usr;
+	DELETE FROM money.collections_tracker WHERE usr = src_usr;
+	UPDATE money.collections_tracker SET collector = dest_usr WHERE collector = src_usr;
+
+	-- permission.*
+	DELETE FROM permission.usr_grp_map WHERE usr = src_usr;
+	DELETE FROM permission.usr_object_perm_map WHERE usr = src_usr;
+	DELETE FROM permission.usr_perm_map WHERE usr = src_usr;
+	DELETE FROM permission.usr_work_ou_map WHERE usr = src_usr;
+
+	-- reporter.*
+	-- Update with a rename to avoid collisions
+	BEGIN
+		FOR renamable_row in
+			SELECT id, name
+			FROM   reporter.output_folder
+			WHERE  owner = src_usr
+		LOOP
+			suffix := ' (' || src_usr || ')';
+			LOOP
+				BEGIN
+					UPDATE  reporter.output_folder
+					SET     owner = dest_usr, name = name || suffix
+					WHERE   id = renamable_row.id;
+				EXCEPTION WHEN unique_violation THEN
+					suffix := suffix || ' ';
+					CONTINUE;
+				END;
+				EXIT;
+			END LOOP;
+		END LOOP;
+	EXCEPTION WHEN undefined_table THEN
+		-- do nothing
+	END;
+
+	BEGIN
+		UPDATE reporter.report SET owner = dest_usr WHERE owner = src_usr;
+	EXCEPTION WHEN undefined_table THEN
+		-- do nothing
+	END;
+
+	-- Update with a rename to avoid collisions
+	BEGIN
+		FOR renamable_row in
+			SELECT id, name
+			FROM   reporter.report_folder
+			WHERE  owner = src_usr
+		LOOP
+			suffix := ' (' || src_usr || ')';
+			LOOP
+				BEGIN
+					UPDATE  reporter.report_folder
+					SET     owner = dest_usr, name = name || suffix
+					WHERE   id = renamable_row.id;
+				EXCEPTION WHEN unique_violation THEN
+					suffix := suffix || ' ';
+					CONTINUE;
+				END;
+				EXIT;
+			END LOOP;
+		END LOOP;
+	EXCEPTION WHEN undefined_table THEN
+		-- do nothing
+	END;
+
+	BEGIN
+		UPDATE reporter.schedule SET runner = dest_usr WHERE runner = src_usr;
+	EXCEPTION WHEN undefined_table THEN
+		-- do nothing
+	END;
+
+	BEGIN
+		UPDATE reporter.template SET owner = dest_usr WHERE owner = src_usr;
+	EXCEPTION WHEN undefined_table THEN
+		-- do nothing
+	END;
+
+	-- Update with a rename to avoid collisions
+	BEGIN
+		FOR renamable_row in
+			SELECT id, name
+			FROM   reporter.template_folder
+			WHERE  owner = src_usr
+		LOOP
+			suffix := ' (' || src_usr || ')';
+			LOOP
+				BEGIN
+					UPDATE  reporter.template_folder
+					SET     owner = dest_usr, name = name || suffix
+					WHERE   id = renamable_row.id;
+				EXCEPTION WHEN unique_violation THEN
+					suffix := suffix || ' ';
+					CONTINUE;
+				END;
+				EXIT;
+			END LOOP;
+		END LOOP;
+	EXCEPTION WHEN undefined_table THEN
+	-- do nothing
+	END;
+
+	-- vandelay.*
+	-- Update with a rename to avoid collisions
+	FOR renamable_row in
+		SELECT id, name
+		FROM   vandelay.queue
+		WHERE  owner = src_usr
+	LOOP
+		suffix := ' (' || src_usr || ')';
+		LOOP
+			BEGIN
+				UPDATE  vandelay.queue
+				SET     owner = dest_usr, name = name || suffix
+				WHERE   id = renamable_row.id;
+			EXCEPTION WHEN unique_violation THEN
+				suffix := suffix || ' ';
+				CONTINUE;
+			END;
+			EXIT;
+		END LOOP;
+	END LOOP;
+
+    UPDATE vandelay.session_tracker SET usr = dest_usr WHERE usr = src_usr;
+
+    -- NULL-ify addresses last so other cleanup (e.g. circ anonymization)
+    -- can access the information before deletion.
+	UPDATE actor.usr SET
+		active = FALSE,
+		card = NULL,
+		mailing_address = NULL,
+		billing_address = NULL
+	WHERE id = src_usr;
+
+END;
+$$ LANGUAGE plpgsql;
+
+
+COMMIT;
diff --git a/Open-ILS/web/js/ui/default/vandelay/vandelay.js b/Open-ILS/web/js/ui/default/vandelay/vandelay.js
index b60cab0..ab03a20 100644
--- a/Open-ILS/web/js/ui/default/vandelay/vandelay.js
+++ b/Open-ILS/web/js/ui/default/vandelay/vandelay.js
@@ -467,21 +467,62 @@ function createQueue(
   * out into the vandelay tables
   */
 function processSpool(key, queueId, type, onload) {
+
     fieldmapper.standardRequest(
         ['open-ils.vandelay', 'open-ils.vandelay.'+type+'.process_spool'],
         {   async: true,
-            params: [authtoken, key, queueId],
-            onresponse : function(r) {
-                var resp = r.recv().content();
-                if(e = openils.Event.parse(resp)) 
+            params: [authtoken, key, queueId, null, null, null, null, 1],
+
+            // exit_early mode returns the tracker
+            // watch it until it completes.
+            oncomplete: function(r) {
+                var tracker = r.recv().content();
+                if(e = openils.Event.parse(tracker)) 
                     return alert(e);
-                dojo.byId('vl-upload-status-count').innerHTML = resp;
-            },
-            oncomplete : function(r) {onload();}
+
+                // Then poll for tracker updates.
+                pollSessionTracker(tracker.id(),
+                    function(tracker) {
+                        dojo.byId('vl-upload-status-count').innerHTML = 
+                            tracker.actions_performed();
+                    },
+                    function() { onload(); }
+                );
+            }
         }
     );
 }
 
+function pollSessionTracker(id, onresponse, oncomplete) {
+
+    function pollTrackerOnce() {
+        // Note this is not an authoritative API call.
+        // Thinking is it will complete eventually regardless.
+
+        fieldmapper.standardRequest(
+            ['open-ils.pcrud', 'open-ils.pcrud.retrieve.vst'], {   
+            params: [authtoken, id],
+            async: true,
+            oncomplete: function(r) {
+                var tracker = openils.Util.readResponse(r);
+                if (tracker && tracker.state() === 'active') {
+                    if (onresponse) {
+                        onresponse(tracker);
+                    }
+                    setTimeout(pollTrackerOnce, 2000);
+                } else {
+                    // tracker is no longer active, session is complete.
+                    if (oncomplete) {
+                        oncomplete();
+                    }
+                }
+            }
+        });
+    }
+    
+    setTimeout(pollTrackerOnce, 1000);
+}
+
 function vlExportInit() {
 
     // queue export
@@ -1393,11 +1434,15 @@ function vlImportAllRecords() {
 }
 
 /* if recList has values, import only those records */
-function vlImportRecordQueue(type, queueId, recList, onload) {
+function vlImportRecordQueue(type, queueId, recList, onload, sessionKey) {
     displayGlobalDiv('vl-generic-progress-with-total');
 
     /* set up options */
-    var options = {overlay_map : currentOverlayRecordsMap};
+    var options = {
+        overlay_map : currentOverlayRecordsMap, 
+        session_key: sessionKey, // link to upload session if possible
+        exit_early: true
+    };
 
     if(vlUploadQueueImportNoMatch.checked) {
         options.import_no_match = true;
@@ -1454,18 +1499,30 @@ function vlImportRecordQueue(type, queueId, recList, onload) {
     }
 
     fieldmapper.standardRequest(
-        ['open-ils.vandelay', method],
-        {   async: true,
-            params: params,
-            onresponse: function(r) {
-                var resp = r.recv().content();
-                if(e = openils.Event.parse(resp))
-                    return alert(e);
-                vlControlledProgressBar.update({maximum:resp.total, progress:resp.progress});
-            },
-            oncomplete: function() {onload();}
+        ['open-ils.vandelay', method], {
+        async: true, 
+        params: params,
+
+        // In exit_early mode, the API returns quickly with the
+        // tracker object.  Once received, poll the tracker until
+        // it's no longer active.
+        oncomplete : function(r) {
+            var tracker = r.recv().content();
+            if(e = openils.Event.parse(tracker)) 
+                return alert(e);
+
+            // Then poll for tracker updates.
+            pollSessionTracker(tracker.id(),
+                function(tracker) {
+                    vlControlledProgressBar.update({
+                        maximum: tracker.total_actions(),  
+                        progress: tracker.actions_performed()
+                    });
+                },
+                function() { onload(); }
+            );
         }
-    );
+    });
 }
 
 
@@ -1479,6 +1536,7 @@ function batchUpload() {
     // could be bib-acq, which makes no sense in most places
     if (currentType.match(/bib/)) currentType = 'bib';
 
+    var sessionKey;
     var handleProcessSpool = function() {
         if( 
             vlUploadQueueImportNoMatch.checked || 
@@ -1492,7 +1550,8 @@ function batchUpload() {
                     null,
                     function() {
                         retrieveQueuedRecords(currentType, currentQueueId, handleRetrieveRecords);
-                    }
+                    },
+                    sessionKey
                 );
         } else {
             retrieveQueuedRecords(currentType, currentQueueId, handleRetrieveRecords);
@@ -1501,6 +1560,7 @@ function batchUpload() {
 
     var handleUploadMARC = function(key) {
         dojo.style(dojo.byId('vl-upload-status-processing'), 'display', 'block');
+        sessionKey = key;
         processSpool(key, currentQueueId, currentType, handleProcessSpool);
     };
 

-----------------------------------------------------------------------

Summary of changes:
 Open-ILS/examples/fm_IDL.xml                       |   44 +++++
 .../perlmods/lib/OpenILS/Application/Vandelay.pm   |  181 +++++++++++++++++---
 Open-ILS/src/sql/Pg/002.schema.config.sql          |    2 +-
 Open-ILS/src/sql/Pg/012.schema.vandelay.sql        |   52 ++++++
 Open-ILS/src/sql/Pg/999.functions.global.sql       |    4 +
 ...sql => 1126.schema.vandelay-state-tracking.sql} |  178 ++++++++++++-------
 Open-ILS/web/js/ui/default/vandelay/vandelay.js    |  102 +++++++++---
 .../Cataloging/async-vandelay.adoc                 |   27 +++
 8 files changed, 483 insertions(+), 107 deletions(-)
 copy Open-ILS/src/sql/Pg/upgrade/{0121.schema.acq_fund_transfer.sql => 1126.schema.vandelay-state-tracking.sql} (81%)
 create mode 100644 docs/RELEASE_NOTES_NEXT/Cataloging/async-vandelay.adoc


hooks/post-receive
-- 
Evergreen ILS


More information about the open-ils-commits mailing list