Add possibility to update the index for a list of files from the GUI and use it to allow updating the index before accessing a file using stale data
This commit is contained in:
parent
6760051652
commit
6b24c3df6b
@ -149,6 +149,38 @@ bool ConfIndexer::indexFiles(std::list<string>& ifiles, IxFlag flag)
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Update index for specific documents. The docs come from an index
|
||||
// query, so the udi, backend etc. fields are filled.
|
||||
bool ConfIndexer::updateDocs(std::vector<Rcl::Doc> &docs, IxFlag flag)
|
||||
{
|
||||
list<string> files;
|
||||
for (vector<Rcl::Doc>::iterator it = docs.begin(); it != docs.end(); it++) {
|
||||
Rcl::Doc &idoc = *it;
|
||||
string backend;
|
||||
idoc.getmeta(Rcl::Doc::keybcknd, &backend);
|
||||
|
||||
// This only makes sense for file system files: beagle docs are
|
||||
// always up to date because they can't be updated in the cache,
|
||||
// only added/removed. Same remark as made inside internfile, we
|
||||
// need a generic way to handle backends.
|
||||
if (!backend.empty() && backend.compare("FS"))
|
||||
continue;
|
||||
|
||||
// Filesystem document. Intern from file.
|
||||
// The url has to be like file://
|
||||
if (idoc.url.find(cstr_fileu) != 0) {
|
||||
LOGERR(("idx::updateDocs: FS backend and non fs url: [%s]\n",
|
||||
idoc.url.c_str()));
|
||||
continue;
|
||||
}
|
||||
files.push_back(idoc.url.substr(7, string::npos));
|
||||
}
|
||||
if (!files.empty()) {
|
||||
return indexFiles(files, flag);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ConfIndexer::purgeFiles(std::list<string> &files)
|
||||
{
|
||||
list<string> myfiles;
|
||||
|
||||
@ -29,6 +29,7 @@ using std::map;
|
||||
|
||||
#include "rclconfig.h"
|
||||
#include "rcldb.h"
|
||||
#include "rcldoc.h"
|
||||
|
||||
class FsIndexer;
|
||||
class BeagleQueueIndexer;
|
||||
@ -96,6 +97,10 @@ class ConfIndexer {
|
||||
/** Index a list of files. No db cleaning or stemdb updating */
|
||||
bool indexFiles(std::list<string> &files, IxFlag f = IxFNone);
|
||||
|
||||
/** Update index for list of documents given as list of docs (out of query)
|
||||
*/
|
||||
bool updateDocs(std::vector<Rcl::Doc> &docs, IxFlag f = IxFNone);
|
||||
|
||||
/** Purge a list of files. */
|
||||
bool purgeFiles(std::list<string> &files);
|
||||
|
||||
|
||||
@ -34,6 +34,8 @@
|
||||
static int stopindexing;
|
||||
static int startindexing;
|
||||
static bool rezero;
|
||||
static vector<Rcl::Doc> idxdocs; // Docs to update. Update all if empty
|
||||
|
||||
static IdxThreadStatus indexingstatus = IDXTS_OK;
|
||||
static string indexingReason;
|
||||
static int stopidxthread;
|
||||
@ -99,7 +101,11 @@ void IdxThread::run()
|
||||
} else {
|
||||
pidfile.write_pid();
|
||||
ConfIndexer *indexer = new ConfIndexer(myconf, this);
|
||||
if (indexer->index(rezero, ConfIndexer::IxTAll)) {
|
||||
bool status = idxdocs.empty() ?
|
||||
indexer->index(rezero, ConfIndexer::IxTAll) :
|
||||
indexer->updateDocs(idxdocs);
|
||||
|
||||
if (status) {
|
||||
indexingstatus = IDXTS_OK;
|
||||
indexingReason = "";
|
||||
} else {
|
||||
@ -152,11 +158,12 @@ void stop_indexing()
|
||||
action_wait.wakeAll();
|
||||
}
|
||||
|
||||
void start_indexing(bool raz)
|
||||
void start_indexing(bool raz, vector<Rcl::Doc> docs)
|
||||
{
|
||||
action_mutex.lock();
|
||||
startindexing = 1;
|
||||
rezero = raz;
|
||||
idxdocs = docs;
|
||||
action_mutex.unlock();
|
||||
action_wait.wakeAll();
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
#define _IDXTHREAD_H_INCLUDED_
|
||||
#include <string>
|
||||
#include "indexer.h"
|
||||
#include "rcldoc.h"
|
||||
|
||||
// These two deal with starting / stopping the thread itself, not
|
||||
// indexing sessions.
|
||||
@ -27,7 +28,8 @@ extern void start_idxthread();
|
||||
extern void stop_idxthread();
|
||||
|
||||
// Use these to to request action from thread
|
||||
extern void start_indexing(bool rezero = false);
|
||||
extern void start_indexing(bool rezero = false,
|
||||
const vector<Rcl::Doc> docs = vector<Rcl::Doc>());
|
||||
extern void stop_indexing();
|
||||
|
||||
// Final status of indexing. indexingstatus is NULL iff indexing is
|
||||
|
||||
@ -950,19 +950,38 @@ void RclMain::startPreview(int docnum, Rcl::Doc doc, int mod)
|
||||
// empty as this does not appear to be a serious issue for single
|
||||
// docs (the main actual problem is displaying the wrong message
|
||||
// from a compacted mail folder)
|
||||
//
|
||||
// !! NOTE: there is one case where doing a partial index update
|
||||
// will not worl: if the search result does not exist in the new
|
||||
// version of the file, it won't be purged from the index because
|
||||
// a partial index pass does no purge, so its ref date will stay
|
||||
// the same and you keep getting the message about the index being
|
||||
// out of date. The only way to fix this is to run a normal
|
||||
// indexing pass.
|
||||
// Also we should re-run the query after updating the index
|
||||
// because the ipaths may be wrong in the current result list
|
||||
if (!doc.ipath.empty()) {
|
||||
string udi, sig;
|
||||
doc.getmeta(Rcl::Doc::keyudi, &udi);
|
||||
FileInterner::makesig(doc, sig);
|
||||
if (rcldb && !udi.empty()) {
|
||||
if (rcldb->needUpdate(udi, sig)) {
|
||||
fprintf(stderr, "AFTER UPDATE CHECK-1\n");
|
||||
QMessageBox::warning(0, tr("Warning"),
|
||||
tr("Index not up to date for this file. "
|
||||
int rep =
|
||||
QMessageBox::warning(0, tr("Warning"),
|
||||
tr("Index not up to date for this file. "
|
||||
"Refusing to risk showing the wrong "
|
||||
"data. Please run indexing"),
|
||||
QMessageBox::Ok,
|
||||
QMessageBox::NoButton);
|
||||
"data. Click ok to update the "
|
||||
"index for this file, then re-run the "
|
||||
"query when indexing is done. "
|
||||
"Else, Cancel."),
|
||||
QMessageBox::Ok,
|
||||
QMessageBox::Cancel,
|
||||
QMessageBox::NoButton);
|
||||
if (rep == QMessageBox::Ok) {
|
||||
LOGDEB(("Requesting index update for %s\n",
|
||||
doc.url.c_str()));
|
||||
start_indexing(false, vector<Rcl::Doc>(1, doc));
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user