From a1122c4e8a369678ae2562428912b18138e4a944 Mon Sep 17 00:00:00 2001 From: Jean-Francois Dockes Date: Sun, 24 Nov 2019 15:02:30 +0100 Subject: [PATCH] Fix format string used to generate/scan circache headers. Use _ not . as prefix for webqueue metadata files Fix log messages and indent --- src/filters/recoll-we-move-files.py | 2 +- src/index/webqueue.cpp | 271 ++++++++++++++-------------- src/utils/circache.cpp | 130 ++++++------- 3 files changed, 204 insertions(+), 199 deletions(-) diff --git a/src/filters/recoll-we-move-files.py b/src/filters/recoll-we-move-files.py index fb528c2d..10487d2b 100755 --- a/src/filters/recoll-we-move-files.py +++ b/src/filters/recoll-we-move-files.py @@ -145,6 +145,6 @@ for hash in cfiles.keys(): shutil.move(os.path.join(downloadsdir, cfiles[hash]), os.path.join(webqueuedir, newname)) shutil.move(os.path.join(downloadsdir, mfiles[hash]), - os.path.join(webqueuedir, "." + newname)) + os.path.join(webqueuedir, "_" + newname)) diff --git a/src/index/webqueue.cpp b/src/index/webqueue.cpp index e22097fd..a31fc4f0 100644 --- a/src/index/webqueue.cpp +++ b/src/index/webqueue.cpp @@ -53,118 +53,118 @@ class WebQueueDotFile { public: WebQueueDotFile(RclConfig *conf, const string& fn) : m_conf(conf), m_fn(fn) - {} + {} // Read input line, strip it of eol and return as c++ string bool readLine(string& line) - { - static const int LL = 2048; - char cline[LL]; - cline[0] = 0; - m_input.getline(cline, LL-1); - if (!m_input.good()) { - if (m_input.bad()) { - LOGERR("WebQueueDotFileRead: input.bad()\n" ); + { + static const int LL = 2048; + char cline[LL]; + cline[0] = 0; + m_input.getline(cline, LL-1); + if (!m_input.good()) { + if (m_input.bad()) { + LOGERR("WebQueueDotFileRead: input.bad()\n"); + } + return false; } - return false; + int ll = strlen(cline); + while (ll > 0 && (cline[ll-1] == '\n' || cline[ll-1] == '\r')) { + cline[ll-1] = 0; + ll--; + } + line.assign(cline, ll); + LOGDEB2("WebQueueDotFile:readLine: [" << line << "]\n"); + return true; } - int ll = strlen(cline); - while (ll > 0 && (cline[ll-1] == '\n' || cline[ll-1] == '\r')) { - cline[ll-1] = 0; - ll--; - } - line.assign(cline, ll); - LOGDEB2("WebQueueDotFile:readLine: [" << (line) << "]\n" ); - return true; - } // Process a Web queue dot file and set interesting stuff in the doc bool toDoc(Rcl::Doc& doc) - { - string line; + { + string line; - m_input.open(m_fn.c_str(), ios::in); - if (!m_input.good()) { - LOGERR("WebQueueDotFile: open failed for [" << (m_fn) << "]\n" ); - return false; - } - - // Read the 3 first lines: - // - url - // - hit type: we only know about Bookmark and WebHistory for now - // - content-type. - if (!readLine(line)) - return false; - doc.url = line; - if (!readLine(line)) - return false; - doc.meta[Rcl::Doc::keybght] = line; - if (!readLine(line)) - return false; - doc.mimetype = line; - - // We set the bookmarks mtype as html (the text is empty - // anyway), so that the html viewer will be called on 'Open' - bool isbookmark = false; - if (!stringlowercmp("bookmark", doc.meta[Rcl::Doc::keybght])) { - isbookmark = true; - doc.mimetype = "text/html"; - } - - string confstr; - string ss(" "); - // Read the rest: fields and keywords. We do a little - // massaging of the input lines, then use a ConfSimple to - // parse, and finally insert the key/value pairs into the doc - // meta[] array - for (;;) { - if (!readLine(line)) { - // Eof hopefully - break; + m_input.open(m_fn.c_str(), ios::in); + if (!m_input.good()) { + LOGERR("WebQueueDotFile: open failed for [" << m_fn << "]\n"); + return false; } - if (line.find("t:") != 0) - continue; - line = line.substr(2); - confstr += line + "\n"; - } - ConfSimple fields(confstr, 1); - vector names = fields.getNames(cstr_null); - for (vector::iterator it = names.begin(); - it != names.end(); it++) { - string value; - fields.get(*it, value, cstr_null); - if (!value.compare("undefined") || !value.compare("null")) - continue; - string *valuep = &value; - string cvalue; - if (isbookmark) { - // It appears that bookmarks are stored in the users' - // locale charset (not too sure). No idea what to do - // for other types, would have to check the plugin. - string charset = m_conf->getDefCharset(true); - transcode(value, cvalue, charset, "UTF-8"); - valuep = &cvalue; + // Read the 3 first lines: + // - url + // - hit type: we only know about Bookmark and WebHistory for now + // - content-type. + if (!readLine(line)) + return false; + doc.url = line; + if (!readLine(line)) + return false; + doc.meta[Rcl::Doc::keybght] = line; + if (!readLine(line)) + return false; + doc.mimetype = line; + + // We set the bookmarks mtype as html (the text is empty + // anyway), so that the html viewer will be called on 'Open' + bool isbookmark = false; + if (!stringlowercmp("bookmark", doc.meta[Rcl::Doc::keybght])) { + isbookmark = true; + doc.mimetype = "text/html"; } + + string confstr; + string ss(" "); + // Read the rest: fields and keywords. We do a little + // massaging of the input lines, then use a ConfSimple to + // parse, and finally insert the key/value pairs into the doc + // meta[] array + for (;;) { + if (!readLine(line)) { + // Eof hopefully + break; + } + if (line.find("t:") != 0) + continue; + line = line.substr(2); + confstr += line + "\n"; + } + ConfSimple fields(confstr, 1); + vector names = fields.getNames(cstr_null); + for (vector::iterator it = names.begin(); + it != names.end(); it++) { + string value; + fields.get(*it, value, cstr_null); + if (!value.compare("undefined") || !value.compare("null")) + continue; + + string *valuep = &value; + string cvalue; + if (isbookmark) { + // It appears that bookmarks are stored in the users' + // locale charset (not too sure). No idea what to do + // for other types, would have to check the plugin. + string charset = m_conf->getDefCharset(true); + transcode(value, cvalue, charset, "UTF-8"); + valuep = &cvalue; + } - string caname = m_conf->fieldCanon(*it); - doc.meta[caname].append(ss + *valuep); - } + string caname = m_conf->fieldCanon(*it); + doc.meta[caname].append(ss + *valuep); + } - // Finally build the confsimple that we will save to the - // cache, from the doc fields. This could also be done in - // parallel with the doc.meta build above, but simpler this - // way. We need it because not all interesting doc fields are - // in the meta array (ie: mimetype, url), and we want - // something homogenous and easy to save. - for (const auto& entry : doc.meta) { - m_fields.set(entry.first, entry.second, cstr_null); - } - m_fields.set(cstr_url, doc.url, cstr_null); - m_fields.set(cstr_bgc_mimetype, doc.mimetype, cstr_null); + // Finally build the confsimple that we will save to the + // cache, from the doc fields. This could also be done in + // parallel with the doc.meta build above, but simpler this + // way. We need it because not all interesting doc fields are + // in the meta array (ie: mimetype, url), and we want + // something homogenous and easy to save. + for (const auto& entry : doc.meta) { + m_fields.set(entry.first, entry.second, cstr_null); + } + m_fields.set(cstr_url, doc.url, cstr_null); + m_fields.set(cstr_bgc_mimetype, doc.mimetype, cstr_null); - return true; - } + return true; + } RclConfig *m_conf; ConfSimple m_fields; @@ -175,7 +175,7 @@ public: // Initialize. Compute paths and create a temporary directory that will be // used by internfile() WebQueueIndexer::WebQueueIndexer(RclConfig *cnf, Rcl::Db *db, - DbIxStatusUpdater *updfunc) + DbIxStatusUpdater *updfunc) : m_config(cnf), m_db(db), m_cache(0), m_updater(updfunc), m_nocacheindex(false) { @@ -186,7 +186,7 @@ WebQueueIndexer::WebQueueIndexer(RclConfig *cnf, Rcl::Db *db, WebQueueIndexer::~WebQueueIndexer() { - LOGDEB("WebQueueIndexer::~\n" ); + LOGDEB("WebQueueIndexer::~\n"); deleteZ(m_cache); } @@ -203,12 +203,12 @@ bool WebQueueIndexer::indexFromCache(const string& udi) string hittype; if (!m_cache || !m_cache->getFromCache(udi, dotdoc, data, &hittype)) { - LOGERR("WebQueueIndexer::indexFromCache: cache failed\n" ); + LOGERR("WebQueueIndexer::indexFromCache: cache failed\n"); return false; } if (hittype.empty()) { - LOGERR("WebQueueIndexer::index: cc entry has no hit type\n" ); + LOGERR("WebQueueIndexer::index: cc entry has no hit type\n"); return false; } @@ -219,17 +219,17 @@ bool WebQueueIndexer::indexFromCache(const string& udi) } else { Rcl::Doc doc; FileInterner interner(data, m_config, - FileInterner::FIF_doUseInputMimetype, + FileInterner::FIF_doUseInputMimetype, dotdoc.mimetype); FileInterner::Status fis; try { fis = interner.internfile(doc); } catch (CancelExcept) { - LOGERR("WebQueueIndexer: interrupted\n" ); + LOGERR("WebQueueIndexer: interrupted\n"); return false; } if (fis != FileInterner::FIDone) { - LOGERR("WebQueueIndexer: bad status from internfile\n" ); + LOGERR("WebQueueIndexer: bad status from internfile\n"); return false; } @@ -258,14 +258,14 @@ bool WebQueueIndexer::index() { if (!m_db) return false; - LOGDEB("WebQueueIndexer::processqueue: [" << (m_queuedir) << "]\n" ); + LOGDEB("WebQueueIndexer::processqueue: [" << m_queuedir << "]\n"); m_config->setKeyDir(m_queuedir); if (!path_makepath(m_queuedir, 0700)) { - LOGERR("WebQueueIndexer:: can't create queuedir [" << (m_queuedir) << "] errno " << (errno) << "\n" ); - return false; + LOGERR("WebQueueIndexer:: can't create queuedir [" << m_queuedir << "] errno " << errno << "\n"); + return false; } if (!m_cache || !m_cache->cc()) { - LOGERR("WebQueueIndexer: cache initialization failed\n" ); + LOGERR("WebQueueIndexer: cache initialization failed\n"); return false; } CirCache *cc = m_cache->cc(); @@ -279,11 +279,11 @@ bool WebQueueIndexer::index() if (!eof) return false; } - int nentries = 0; + int nentries = 0; do { string udi; if (!cc->getCurrentUdi(udi)) { - LOGERR("WebQueueIndexer:: cache file damaged\n" ); + LOGERR("WebQueueIndexer:: cache file damaged\n"); break; } if (udi.empty()) @@ -296,11 +296,11 @@ bool WebQueueIndexer::index() indexFromCache(udi); updstatus(udi); } catch (CancelExcept) { - LOGERR("WebQueueIndexer: interrupted\n" ); + LOGERR("WebQueueIndexer: interrupted\n"); return false; } } - nentries++; + nentries++; } while (cc->next(eof)); } @@ -308,17 +308,17 @@ bool WebQueueIndexer::index() FsTreeWalker walker(FsTreeWalker::FtwNoRecurse); walker.addSkippedName(".*"); FsTreeWalker::Status status = walker.walk(m_queuedir, *this); - LOGDEB("WebQueueIndexer::processqueue: done: status " << (status) << "\n" ); + LOGDEB("WebQueueIndexer::processqueue: done: status " << status << "\n"); return true; } // Index a list of files (sent by the real time monitor) bool WebQueueIndexer::indexFiles(list& files) { - LOGDEB("WebQueueIndexer::indexFiles\n" ); + LOGDEB("WebQueueIndexer::indexFiles\n"); if (!m_db) { - LOGERR("WebQueueIndexer::indexfiles no db??\n" ); + LOGERR("WebQueueIndexer::indexfiles no db??\n"); return false; } for (list::iterator it = files.begin(); it != files.end();) { @@ -327,7 +327,7 @@ bool WebQueueIndexer::indexFiles(list& files) } string father = path_getfather(*it); if (father.compare(m_queuedir)) { - LOGDEB("WebQueueIndexer::indexfiles: skipping [" << *it << "] (nq)\n" ); + LOGDEB("WebQueueIndexer::indexfiles: skipping [" << *it << "] (nq)\n"); it++; continue; } // Pb: we are often called with the dot file, before the @@ -343,13 +343,13 @@ bool WebQueueIndexer::indexFiles(list& files) } struct stat st; if (path_fileprops(*it, &st) != 0) { - LOGERR("WebQueueIndexer::indexfiles: cant stat [" << *it << "]\n" ); + LOGERR("WebQueueIndexer::indexfiles: cant stat [" << *it << "]\n"); it++; continue; } - if (!S_ISREG(st.st_mode)) { - LOGDEB("WebQueueIndexer::indexfiles: skipping [" << *it << "] (nr)\n" ); + if (!S_ISREG(st.st_mode)) { + LOGDEB("WebQueueIndexer::indexfiles: skipping [" << *it << "] (nr)\n"); it++; continue; - } + } processone(*it, &st, FsTreeWalker::FtwRegular); it = files.erase(it); @@ -362,8 +362,8 @@ bool WebQueueIndexer::indexFiles(list& files) FsTreeWalker::Status WebQueueIndexer::processone(const string &path, - const struct stat *stp, - FsTreeWalker::CbFlag flg) + const struct stat *stp, + FsTreeWalker::CbFlag flg) { if (!m_db) //?? return FsTreeWalker::FtwError; @@ -374,8 +374,8 @@ WebQueueIndexer::processone(const string &path, return FsTreeWalker::FtwOk; string dotpath = path_cat(path_getfather(path), - string(".") + path_getsimple(path)); - LOGDEB("WebQueueIndexer: prc1: [" << (path) << "]\n" ); + string("_") + path_getsimple(path)); + LOGDEB("WebQueueIndexer: prc1: [" << path << "]\n"); WebQueueDotFile dotfile(m_config, dotpath); Rcl::Doc dotdoc; @@ -389,7 +389,7 @@ WebQueueIndexer::processone(const string &path, udipath = path_cat(dotdoc.meta[Rcl::Doc::keybght], url_gpath(dotdoc.url)); make_udi(udipath, cstr_null, udi); - LOGDEB("WebQueueIndexer: prc1: udi [" << (udi) << "]\n" ); + LOGDEB("WebQueueIndexer: prc1: udi [" << udi << "]\n"); char ascdate[30]; sprintf(ascdate, "%ld", long(stp->st_mtime)); @@ -421,11 +421,11 @@ WebQueueIndexer::processone(const string &path, try { fis = interner.internfile(doc); } catch (CancelExcept) { - LOGERR("WebQueueIndexer: interrupted\n" ); + LOGERR("WebQueueIndexer: interrupted\n"); goto out; } if (fis != FileInterner::FIDone && fis != FileInterner::FIAgain) { - LOGERR("WebQueueIndexer: bad status from internfile\n" ); + LOGERR("WebQueueIndexer: bad status from internfile\n"); // TOBEDONE: internfile can return FIAgain here if it is // paging a big text file, we should loop. Means we're // only indexing the first page for text/plain files @@ -451,18 +451,18 @@ WebQueueIndexer::processone(const string &path, { // doc fields not in meta, needing saving to the cache dotfile.m_fields.set("fmtime", dotdoc.fmtime, cstr_null); - // fbytes is used for historical reasons, should be pcbytes, but makes - // no sense to change. + // fbytes is used for historical reasons, should be pcbytes, but makes + // no sense to change. dotfile.m_fields.set(cstr_fbytes, dotdoc.pcbytes, cstr_null); dotfile.m_fields.set("udi", udi, cstr_null); string fdata; file_to_string(path, fdata); if (!m_cache || !m_cache->cc()) { - LOGERR("WebQueueIndexer: cache initialization failed\n" ); + LOGERR("WebQueueIndexer: cache initialization failed\n"); goto out; } if (!m_cache->cc()->put(udi, &dotfile.m_fields, fdata, 0)) { - LOGERR("WebQueueIndexer::prc1: cache_put failed; " << (m_cache->cc()->getReason()) << "\n" ); + LOGERR("WebQueueIndexer::prc1: cache_put failed; " << m_cache->cc()->getReason() << "\n"); goto out; } } @@ -470,9 +470,12 @@ WebQueueIndexer::processone(const string &path, dounlink = true; out: if (dounlink) { - unlink(path.c_str()); - unlink(dotpath.c_str()); + if (unlink(path.c_str())) { + LOGSYSERR("WebQueueIndexer::processone", "unlink", path); + } + if (unlink(dotpath.c_str())) { + LOGSYSERR("WebQueueIndexer::processone", "unlink", dotpath); + } } return FsTreeWalker::FtwOk; } - diff --git a/src/utils/circache.cpp b/src/utils/circache.cpp index c612f4c5..371c307f 100644 --- a/src/utils/circache.cpp +++ b/src/utils/circache.cpp @@ -29,6 +29,7 @@ #include "safeunistd.h" #include #include +#include #include @@ -72,9 +73,6 @@ static ssize_t writev(int fd, const struct iovec *iov, int iovcnt) #include "md5.h" using namespace std; -typedef unsigned char UCHAR; -typedef unsigned int UINT; -typedef unsigned long ULONG; /** Temp buffer with automatic deallocation */ struct TempBuf { @@ -125,17 +123,17 @@ struct TempBuf { #define CIRCACHE_FIRSTBLOCK_SIZE 1024 // Entry header. -// 3 x 32 bits sizes as hex integers + 1 x 16 bits flag + at least 1 zero -// 15 + 3x(9) + 3 + 1 = 46 -static const char *headerformat = "circacheSizes = %x %x %x %hx"; +// 2x32 1x64 bits ints as hex integers + 1 x 16 bits flag + at least 1 zero +// 15 + 2x9 + 17 + 3 + 1 = 54 +static const char *headerformat = "circacheSizes = %x %x %llx %hx"; #define CIRCACHE_HEADER_SIZE 64 class EntryHeaderData { public: EntryHeaderData() : dicsize(0), datasize(0), padsize(0), flags(0) {} - UINT dicsize; - UINT datasize; - UINT padsize; + unsigned int dicsize; + unsigned int datasize; + uint64_t padsize; unsigned short flags; }; enum EntryFlags {EFNone = 0, EFDataCompressed = 1}; @@ -158,13 +156,13 @@ public: #define UDIHLEN 4 class UdiH { public: - UCHAR h[UDIHLEN]; + unsigned char h[UDIHLEN]; UdiH(const string& udi) { MD5_CTX ctx; MD5Init(&ctx); - MD5Update(&ctx, (const UCHAR*)udi.c_str(), udi.length()); - UCHAR md[16]; + MD5Update(&ctx, (const unsigned char*)udi.c_str(), udi.length()); + unsigned char md[16]; MD5Final(md, &ctx); memcpy(h, md, UDIHLEN); } @@ -237,28 +235,28 @@ public: bool khEnter(const string& udi, int64_t ofs) { UdiH h(udi); - LOGDEB2("Circache::khEnter: h " << (h.asHexString()) << " offs " << ((ULONG)ofs) << " udi [" << (udi) << "]\n" ); + LOGDEB2("Circache::khEnter: h " << h.asHexString() << " offs " << ofs << " udi [" << udi << "]\n"); pair p = m_ofskh.equal_range(h); if (p.first != m_ofskh.end() && p.first->first == h) { for (kh_type::iterator it = p.first; it != p.second; it++) { - LOGDEB2("Circache::khEnter: col h " << (it->first.asHexString()) << ", ofs " << ((ULONG)it->second) << "\n" ); + LOGDEB2("Circache::khEnter: col h " << it->first.asHexString() << ", ofs " << it->second << "\n"); if (it->second == ofs) { // (h,offs) already there. Happens - LOGDEB2("Circache::khEnter: already there\n" ); + LOGDEB2("Circache::khEnter: already there\n"); return true; } } } m_ofskh.insert(kh_value_type(h, ofs)); - LOGDEB2("Circache::khEnter: inserted\n" ); + LOGDEB2("Circache::khEnter: inserted\n"); return true; } void khDump() { for (kh_type::const_iterator it = m_ofskh.begin(); it != m_ofskh.end(); it++) { - LOGDEB("Circache::KHDUMP: " << (it->first.asHexString()) << " " << ((ULONG)it->second) << "\n" ); + LOGDEB("Circache::KHDUMP: " << it->first.asHexString() << " " << it->second << "\n"); } } @@ -270,19 +268,19 @@ public: UdiH h(udi); - LOGDEB2("Circache::khFind: h " << (h.asHexString()) << " udi [" << (udi) << "]\n" ); + LOGDEB2("Circache::khFind: h " << h.asHexString() << " udi [" << udi << "]\n"); pair p = m_ofskh.equal_range(h); #if 0 if (p.first == m_ofskh.end()) { - LOGDEB("KHFIND: FIRST END()\n" ); + LOGDEB("KHFIND: FIRST END()\n"); } if (p.second == m_ofskh.end()) { - LOGDEB("KHFIND: SECOND END()\n" ); + LOGDEB("KHFIND: SECOND END()\n"); } if (!(p.first->first == h)) - LOGDEB("KHFIND: NOKEY: " << (p.first->first.asHexString()) << " " << (p.second->first.asHexString()) << "\n" ); + LOGDEB("KHFIND: NOKEY: " << p.first->first.asHexString() << " " << p.second->first.asHexString() << "\n"); #endif if (p.first == m_ofskh.end() || !(p.first->first == h)) { @@ -496,7 +494,8 @@ public: offset << " [" << bf << "]"; return CCScanHook::Error; } - LOGDEB2("Circache:readEntryHeader: dcsz " << (d.dicsize) << " dtsz " << (d.datasize) << " pdsz " << (d.padsize) << " flgs " << (d.flags) << "\n" ); + LOGDEB2("Circache:readEntryHeader: dcsz " << d.dicsize << " dtsz " << d.datasize << " pdsz " << d.padsize << + " flgs " << d.flags << "\n"); return CCScanHook::Continue; } @@ -630,7 +629,7 @@ public: } if (hd.flags & EFDataCompressed) { - LOGDEB1("Circache:readdicdata: data compressed\n" ); + LOGDEB1("Circache:readdicdata: data compressed\n"); ZLibUtBuf buf; if (!inflateToBuf(bf, hd.datasize, buf)) { m_reason << "CirCache: decompression failed "; @@ -638,7 +637,7 @@ public: } data->assign(buf.getBuf(), buf.getCnt()); } else { - LOGDEB1("Circache:readdicdata: data NOT compressed\n" ); + LOGDEB1("Circache:readdicdata: data NOT compressed\n"); data->assign(bf, hd.datasize); } } else { @@ -653,7 +652,7 @@ CirCache::CirCache(const string& dir) : m_dir(dir) { m_d = new CirCacheInternal; - LOGDEB0("CirCache: [" << (m_dir) << "]\n" ); + LOGDEB0("CirCache: [" << m_dir << "]\n"); } CirCache::~CirCache() @@ -681,7 +680,7 @@ public: const EntryHeaderData& d) { headoffs = offs; padsize = d.padsize; - LOGDEB2("CCScanHookRecord::takeone: offs " << (lltodecstr(headoffs)) << " padsize " << (lltodecstr(padsize)) << "\n" ); + LOGDEB2("CCScanHookRecord::takeone: offs " << headoffs << " padsize " << padsize << "\n"); return Continue; } }; @@ -693,9 +692,9 @@ string CirCache::getpath() bool CirCache::create(int64_t maxsize, int flags) { - LOGDEB("CirCache::create: [" << (m_dir) << "] maxsz " << (lltodecstr((long long)maxsize)) << " flags 0x" << (flags) << "\n" ); + LOGDEB("CirCache::create: [" << m_dir << "] maxsz " << maxsize << " flags 0x" << std::hex << flags <m_maxsize && ((flags & CC_CRUNIQUE) != 0) == m_d->m_uniquentries) { - LOGDEB("Header unchanged, no rewrite\n" ); + LOGDEB("Header unchanged, no rewrite\n"); return true; } // If the new maxsize is bigger than current size, we need @@ -734,7 +733,9 @@ bool CirCache::create(int64_t maxsize, int flags) } m_d->m_maxsize = maxsize; m_d->m_uniquentries = ((flags & CC_CRUNIQUE) != 0); - LOGDEB2("CirCache::create: rewriting header with maxsize " << (lltodecstr(m_d->m_maxsize)) << " oheadoffs " << (lltodecstr(m_d->m_oheadoffs)) << " nheadoffs " << (lltodecstr(m_d->m_nheadoffs)) << " npadsize " << (m_d->m_npadsize) << " unient " << (int(m_d->m_uniquentries)) << "\n" ); + LOGDEB2("CirCache::create: rewriting header with maxsize " << m_d->m_maxsize << " oheadoffs " << + m_d->m_oheadoffs << " nheadoffs " << m_d->m_nheadoffs << " npadsize " << m_d->m_npadsize << + " unient " << m_d->m_uniquentries << "\n"); return m_d->writefirstblock(); } // Else fallthrough to create file @@ -756,7 +757,7 @@ bool CirCache::create(int64_t maxsize, int flags) if (::write(m_d->m_fd, buf, CIRCACHE_FIRSTBLOCK_SIZE) != CIRCACHE_FIRSTBLOCK_SIZE) { m_d->m_reason << "CirCache::create: write header failed, errno " - << errno; + << errno; return false; } return m_d->writefirstblock(); @@ -765,7 +766,7 @@ bool CirCache::create(int64_t maxsize, int flags) bool CirCache::open(OpMode mode) { if (m_d == 0) { - LOGERR("CirCache::open: null data\n" ); + LOGERR("CirCache::open: null data\n"); return false; } @@ -788,7 +789,7 @@ public: virtual status takeone(int64_t offs, const string& udi, const EntryHeaderData& d) { cout << "Scan: offs " << offs << " dicsize " << d.dicsize - << " datasize " << d.datasize << " padsize " << d.padsize << + << " datasize " << d.datasize << " padsize " << d.padsize << " flags " << d.flags << " udi [" << udi << "]" << endl; return Continue; @@ -836,7 +837,8 @@ public: virtual status takeone(int64_t offs, const string& udi, const EntryHeaderData& d) { - LOGDEB2("Circache:Scan: off " << (long(offs)) << " udi [" << (udi) << "] dcsz " << ((UINT)d.dicsize) << " dtsz " << ((UINT)d.datasize) << " pdsz " << ((UINT)d.padsize) << " flgs " << (d.flags) << "\n" ); + LOGDEB2("Circache:Scan: off " << offs << " udi [" << udi << "] dcsz " << d.dicsize << " dtsz " << d.datasize << + " pdsz " << d.padsize << " flgs " << d.flags << "\n"); if (!m_udi.compare(udi)) { m_instance++; m_offs = offs; @@ -858,21 +860,21 @@ bool CirCache::get(const string& udi, string& dic, string *data, int instance) return false; } - LOGDEB0("CirCache::get: udi [" << (udi) << "], instance " << (instance) << "\n" ); + LOGDEB0("CirCache::get: udi [" << udi << "], instance " << instance << "\n"); // If memory map is up to date, use it: if (m_d->m_ofskhcplt) { - LOGDEB1("CirCache::get: using ofskh\n" ); + LOGDEB1("CirCache::get: using ofskh\n"); //m_d->khDump(); vector ofss; if (m_d->khFind(udi, ofss)) { - LOGDEB1("Circache::get: h found, colls " << (ofss.size()) << "\n" ); + LOGDEB1("Circache::get: h found, colls " << ofss.size() << "\n"); int finst = 1; EntryHeaderData d_good; int64_t o_good = 0; for (vector::iterator it = ofss.begin(); it != ofss.end(); it++) { - LOGDEB1("Circache::get: trying offs " << ((ULONG)*it) << "\n" ); + LOGDEB1("Circache::get: trying offs " << *it << "\n"); EntryHeaderData d; string fudi; if (!m_d->readHUdi(*it, d, fudi)) { @@ -894,7 +896,7 @@ bool CirCache::get(const string& udi, string& dic, string *data, int instance) // Did we read an appropriate entry ? if (o_good != 0 && (instance == -1 || instance == finst)) { bool ret = m_d->readDicData(o_good, d_good, dic, data); - LOGDEB0("Circache::get: hfound, " << (chron.millis()) << " mS\n" ); + LOGDEB0("Circache::get: hfound, " << chron.millis() << " mS\n"); return ret; } // Else try to scan anyway. @@ -913,15 +915,14 @@ bool CirCache::get(const string& udi, string& dic, string *data, int instance) return false; } bool bret = m_d->readDicData(getter.m_offs, getter.m_hd, dic, data); - LOGDEB0("Circache::get: scanfound, " << (chron.millis()) << " mS\n" ); - + LOGDEB0("Circache::get: scanfound, " << chron.millis() << " mS\n"); return bret; } bool CirCache::erase(const string& udi, bool reallyclear) { if (m_d == 0) { - LOGERR("CirCache::erase: null data\n" ); + LOGERR("CirCache::erase: null data\n"); return false; } if (m_d->m_fd < 0) { @@ -929,7 +930,7 @@ bool CirCache::erase(const string& udi, bool reallyclear) return false; } - LOGDEB0("CirCache::erase: udi [" << (udi) << "]\n" ); + LOGDEB0("CirCache::erase: udi [" << udi << "]\n"); // If the mem cache is not up to date, update it, we're too lazy // to do a scan @@ -937,7 +938,7 @@ bool CirCache::erase(const string& udi, bool reallyclear) string dic; get("nosuchudi probably exists", dic); if (!m_d->m_ofskhcplt) { - LOGERR("CirCache::erase : cache not updated after get\n" ); + LOGERR("CirCache::erase : cache not updated after get\n"); return false; } } @@ -945,27 +946,27 @@ bool CirCache::erase(const string& udi, bool reallyclear) vector ofss; if (!m_d->khFind(udi, ofss)) { // Udi not in there, erase ok - LOGDEB("CirCache::erase: khFind returns none\n" ); + LOGDEB("CirCache::erase: khFind returns none\n"); return true; } for (vector::iterator it = ofss.begin(); it != ofss.end(); it++) { - LOGDEB2("CirCache::erase: reading at " << ((unsigned long)*it) << "\n" ); + LOGDEB2("CirCache::erase: reading at " << *it << "\n"); EntryHeaderData d; string fudi; if (!m_d->readHUdi(*it, d, fudi)) { return false; } - LOGDEB2("CirCache::erase: found fudi [" << (fudi) << "]\n" ); + LOGDEB2("CirCache::erase: found fudi [" << fudi << "]\n"); if (!fudi.compare(udi)) { EntryHeaderData nd; nd.padsize = d.dicsize + d.datasize + d.padsize; - LOGDEB2("CirCache::erase: rewrite at " << ((unsigned long)*it) << "\n" ); + LOGDEB2("CirCache::erase: rewrite at " << *it << "\n"); if (*it == m_d->m_nheadoffs) { m_d->m_npadsize = nd.padsize; } if (!m_d->writeEntryHeader(*it, nd, reallyclear)) { - LOGERR("CirCache::erase: write header failed\n" ); + LOGERR("CirCache::erase: write header failed\n"); return false; } } @@ -988,7 +989,8 @@ public: virtual status takeone(int64_t offs, const string& udi, const EntryHeaderData& d) { - LOGDEB2("Circache:ScanSpacer:off " << ((UINT)offs) << " dcsz " << (d.dicsize) << " dtsz " << (d.datasize) << " pdsz " << (d.padsize) << " udi[" << (udi) << "]\n" ); + LOGDEB2("Circache:ScanSpacer:off " << offs << " dcsz " << d.dicsize << " dtsz " << d.datasize << + " pdsz " << d.padsize << " udi[" << udi << "]\n"); sizeseen += CIRCACHE_HEADER_SIZE + d.dicsize + d.datasize + d.padsize; squashed_udis.push_back(make_pair(udi, offs)); if (sizeseen >= sizewanted) { @@ -1002,7 +1004,7 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, const string& data, unsigned int iflags) { if (m_d == 0) { - LOGERR("CirCache::put: null data\n" ); + LOGERR("CirCache::put: null data\n"); return false; } if (m_d->m_fd < 0) { @@ -1014,14 +1016,14 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, string dic; if (!iconf || !iconf->get("udi", dic) || dic.empty() || dic.compare(udi)) { m_d->m_reason << "No/bad 'udi' entry in input dic"; - LOGERR("Circache::put: no/bad udi: DIC:[" << (dic) << "] UDI [" << (udi) << "]\n" ); + LOGERR("Circache::put: no/bad udi: DIC:[" << dic << "] UDI [" << udi << "]\n"); return false; } // Possibly erase older entries. Need to do this first because we may be // able to reuse the space if the same udi was last written if (m_d->m_uniquentries && !erase(udi)) { - LOGERR("CirCache::put: can't erase older entries\n" ); + LOGERR("CirCache::put: can't erase older entries\n"); return false; } @@ -1058,7 +1060,7 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, int64_t npadsize = 0; bool extending = false; - LOGDEB("CirCache::put: nsz " << (nsize) << " oheadoffs " << (m_d->m_oheadoffs) << "\n" ); + LOGDEB("CirCache::put: nsz " << nsize << " oheadoffs " << m_d->m_oheadoffs << "\n"); // Check if we can recover some pad space from the (physically) previous // entry. @@ -1080,7 +1082,7 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, // the header, we're going to write on it. recovpadsize += CIRCACHE_HEADER_SIZE; } else { - LOGDEB("CirCache::put: recov. prev. padsize " << (pd.padsize) << "\n" ); + LOGDEB("CirCache::put: recov. prev. padsize " << pd.padsize << "\n"); pd.padsize = 0; if (!m_d->writeEntryHeader(m_d->m_nheadoffs, pd)) { return false; @@ -1093,7 +1095,7 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, if (nsize <= recovpadsize) { // If the new entry fits entirely in the pad area from the // latest one, no need to recycle stuff - LOGDEB("CirCache::put: new fits in old padsize " << (recovpadsize) << "\n" ); + LOGDEB("CirCache::put: new fits in old padsize " << recovpadsize << "\n"); npadsize = recovpadsize - nsize; } else if (st.st_size < m_d->m_maxsize) { // Still growing the file. @@ -1103,11 +1105,11 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, // Scan the file until we have enough space for the new entry, // and determine the pad size up to the 1st preserved entry int64_t scansize = nsize - recovpadsize; - LOGDEB("CirCache::put: scanning for size " << (scansize) << " from offs " << ((UINT)m_d->m_oheadoffs) << "\n" ); + LOGDEB("CirCache::put: scanning for size " << scansize << " from offs " << m_d->m_oheadoffs << "\n"); CCScanHookSpacer spacer(scansize); switch (m_d->scan(m_d->m_oheadoffs, &spacer)) { case CCScanHook::Stop: - LOGDEB("CirCache::put: Scan ok, sizeseen " << (spacer.sizeseen) << "\n" ); + LOGDEB("CirCache::put: Scan ok, sizeseen " << spacer.sizeseen << "\n"); npadsize = spacer.sizeseen - scansize; break; case CCScanHook::Eof: @@ -1122,7 +1124,7 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, m_d->khClear(spacer.squashed_udis); } - LOGDEB("CirCache::put: writing " << (nsize) << " at " << (nwriteoffs) << " padsize " << (npadsize) << "\n" ); + LOGDEB("CirCache::put: writing " << nsize << " at " << nwriteoffs << " padsize " << npadsize << "\n"); if (lseek(m_d->m_fd, nwriteoffs, 0) != nwriteoffs) { m_d->m_reason << "CirCache::put: lseek failed: " << errno; @@ -1166,7 +1168,7 @@ bool CirCache::put(const string& udi, const ConfSimple *iconf, bool CirCache::rewind(bool& eof) { if (m_d == 0) { - LOGERR("CirCache::rewind: null data\n" ); + LOGERR("CirCache::rewind: null data\n"); return false; } @@ -1174,7 +1176,7 @@ bool CirCache::rewind(bool& eof) int64_t fsize = lseek(m_d->m_fd, 0, SEEK_END); if (fsize == (int64_t) - 1) { - LOGERR("CirCache::rewind: seek to EOF failed\n" ); + LOGERR("CirCache::rewind: seek to EOF failed\n"); return false; } // Read oldest header. This is either at the position pointed to @@ -1201,7 +1203,7 @@ bool CirCache::rewind(bool& eof) bool CirCache::next(bool& eof) { if (m_d == 0) { - LOGERR("CirCache::next: null data\n" ); + LOGERR("CirCache::next: null data\n"); return false; } @@ -1238,7 +1240,7 @@ bool CirCache::next(bool& eof) bool CirCache::getCurrentUdi(string& udi) { if (m_d == 0) { - LOGERR("CirCache::getCurrentUdi: null data\n" ); + LOGERR("CirCache::getCurrentUdi: null data\n"); return false; } @@ -1251,7 +1253,7 @@ bool CirCache::getCurrentUdi(string& udi) bool CirCache::getCurrent(string& udi, string& dic, string *data) { if (m_d == 0) { - LOGERR("CirCache::getCurrent: null data\n" ); + LOGERR("CirCache::getCurrent: null data\n"); return false; } if (!m_d->readDicData(m_d->m_itoffs, m_d->m_ithd, dic, data)) {